ntdll: Add RtlDosPathNameToRelativeNtPathName_U.
[wine.git] / server / device.c
blob436dac6bfe945eefb5387f2f11eea990d43b713d
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
23 #include <assert.h>
24 #include <fcntl.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdarg.h>
29 #include "ntstatus.h"
30 #define WIN32_NO_STATUS
31 #include "windef.h"
32 #include "winternl.h"
33 #include "ddk/wdm.h"
34 #include "wine/rbtree.h"
36 #include "object.h"
37 #include "file.h"
38 #include "handle.h"
39 #include "request.h"
40 #include "process.h"
42 /* IRP object */
44 struct irp_call
46 struct object obj; /* object header */
47 struct list dev_entry; /* entry in device queue */
48 struct list mgr_entry; /* entry in manager queue */
49 struct device_file *file; /* file containing this irp */
50 struct thread *thread; /* thread that queued the irp */
51 struct async *async; /* pending async op */
52 irp_params_t params; /* irp parameters */
53 struct iosb *iosb; /* I/O status block */
54 int canceled; /* the call was canceled */
55 client_ptr_t user_ptr; /* client side pointer */
58 static void irp_call_dump( struct object *obj, int verbose );
59 static void irp_call_destroy( struct object *obj );
61 static const struct object_ops irp_call_ops =
63 sizeof(struct irp_call), /* size */
64 &no_type, /* type */
65 irp_call_dump, /* dump */
66 no_add_queue, /* add_queue */
67 NULL, /* remove_queue */
68 NULL, /* signaled */
69 NULL, /* satisfied */
70 no_signal, /* signal */
71 no_get_fd, /* get_fd */
72 default_map_access, /* map_access */
73 default_get_sd, /* get_sd */
74 default_set_sd, /* set_sd */
75 no_get_full_name, /* get_full_name */
76 no_lookup_name, /* lookup_name */
77 no_link_name, /* link_name */
78 NULL, /* unlink_name */
79 no_open_file, /* open_file */
80 no_kernel_obj_list, /* get_kernel_obj_list */
81 no_close_handle, /* close_handle */
82 irp_call_destroy /* destroy */
86 /* device manager (a list of devices managed by the same client process) */
88 struct device_manager
90 struct object obj; /* object header */
91 struct list devices; /* list of devices */
92 struct list requests; /* list of pending irps across all devices */
93 struct irp_call *current_call; /* call currently executed on client side */
94 struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
97 static void device_manager_dump( struct object *obj, int verbose );
98 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
99 static void device_manager_destroy( struct object *obj );
101 static const struct object_ops device_manager_ops =
103 sizeof(struct device_manager), /* size */
104 &no_type, /* type */
105 device_manager_dump, /* dump */
106 add_queue, /* add_queue */
107 remove_queue, /* remove_queue */
108 device_manager_signaled, /* signaled */
109 no_satisfied, /* satisfied */
110 no_signal, /* signal */
111 no_get_fd, /* get_fd */
112 default_map_access, /* map_access */
113 default_get_sd, /* get_sd */
114 default_set_sd, /* set_sd */
115 no_get_full_name, /* get_full_name */
116 no_lookup_name, /* lookup_name */
117 no_link_name, /* link_name */
118 NULL, /* unlink_name */
119 no_open_file, /* open_file */
120 no_kernel_obj_list, /* get_kernel_obj_list */
121 no_close_handle, /* close_handle */
122 device_manager_destroy /* destroy */
126 /* device (a single device object) */
128 static const WCHAR device_name[] = {'D','e','v','i','c','e'};
130 struct type_descr device_type =
132 { device_name, sizeof(device_name) }, /* name */
133 FILE_ALL_ACCESS, /* valid_access */
134 { /* mapping */
135 FILE_GENERIC_READ,
136 FILE_GENERIC_WRITE,
137 FILE_GENERIC_EXECUTE,
138 FILE_ALL_ACCESS
142 struct device
144 struct object obj; /* object header */
145 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
146 char *unix_path; /* path to unix device if any */
147 struct list kernel_object; /* list of kernel object pointers */
148 struct list entry; /* entry in device manager list */
149 struct list files; /* list of open files */
152 static void device_dump( struct object *obj, int verbose );
153 static void device_destroy( struct object *obj );
154 static struct object *device_open_file( struct object *obj, unsigned int access,
155 unsigned int sharing, unsigned int options );
156 static struct list *device_get_kernel_obj_list( struct object *obj );
158 static const struct object_ops device_ops =
160 sizeof(struct device), /* size */
161 &device_type, /* type */
162 device_dump, /* dump */
163 no_add_queue, /* add_queue */
164 NULL, /* remove_queue */
165 NULL, /* signaled */
166 no_satisfied, /* satisfied */
167 no_signal, /* signal */
168 no_get_fd, /* get_fd */
169 default_map_access, /* map_access */
170 default_get_sd, /* get_sd */
171 default_set_sd, /* set_sd */
172 default_get_full_name, /* get_full_name */
173 no_lookup_name, /* lookup_name */
174 directory_link_name, /* link_name */
175 default_unlink_name, /* unlink_name */
176 device_open_file, /* open_file */
177 device_get_kernel_obj_list, /* get_kernel_obj_list */
178 no_close_handle, /* close_handle */
179 device_destroy /* destroy */
183 /* device file (an open file handle to a device) */
185 struct device_file
187 struct object obj; /* object header */
188 struct device *device; /* device for this file */
189 struct fd *fd; /* file descriptor for irp */
190 struct list kernel_object; /* list of kernel object pointers */
191 int closed; /* closed file flag */
192 struct list entry; /* entry in device list */
193 struct list requests; /* list of pending irp requests */
196 static void device_file_dump( struct object *obj, int verbose );
197 static struct fd *device_file_get_fd( struct object *obj );
198 static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len );
199 static struct list *device_file_get_kernel_obj_list( struct object *obj );
200 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
201 static void device_file_destroy( struct object *obj );
202 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
203 static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
204 static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
205 static void device_file_flush( struct fd *fd, struct async *async );
206 static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
207 static void device_file_cancel_async( struct fd *fd, struct async *async );
208 static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class );
210 static const struct object_ops device_file_ops =
212 sizeof(struct device_file), /* size */
213 &file_type, /* type */
214 device_file_dump, /* dump */
215 add_queue, /* add_queue */
216 remove_queue, /* remove_queue */
217 default_fd_signaled, /* signaled */
218 no_satisfied, /* satisfied */
219 no_signal, /* signal */
220 device_file_get_fd, /* get_fd */
221 default_map_access, /* map_access */
222 default_get_sd, /* get_sd */
223 default_set_sd, /* set_sd */
224 device_file_get_full_name, /* get_full_name */
225 no_lookup_name, /* lookup_name */
226 no_link_name, /* link_name */
227 NULL, /* unlink_name */
228 no_open_file, /* open_file */
229 device_file_get_kernel_obj_list, /* get_kernel_obj_list */
230 device_file_close_handle, /* close_handle */
231 device_file_destroy /* destroy */
234 static const struct fd_ops device_file_fd_ops =
236 default_fd_get_poll_events, /* get_poll_events */
237 default_poll_event, /* poll_event */
238 device_file_get_fd_type, /* get_fd_type */
239 device_file_read, /* read */
240 device_file_write, /* write */
241 device_file_flush, /* flush */
242 default_fd_get_file_info, /* get_file_info */
243 device_file_get_volume_info, /* get_volume_info */
244 device_file_ioctl, /* ioctl */
245 device_file_cancel_async, /* cancel_async */
246 default_fd_queue_async, /* queue_async */
247 default_fd_reselect_async, /* reselect_async */
251 struct list *no_kernel_obj_list( struct object *obj )
253 return NULL;
256 struct kernel_object
258 struct device_manager *manager;
259 client_ptr_t user_ptr;
260 struct object *object;
261 int owned;
262 struct list list_entry;
263 struct wine_rb_entry rb_entry;
266 static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
268 struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
269 return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
272 static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
274 struct kernel_object *kernel_object;
275 struct list *list;
277 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
278 LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
280 if (kernel_object->manager != manager) continue;
281 return kernel_object;
283 return NULL;
286 static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
288 struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
289 return kernel_object ? kernel_object->user_ptr : 0;
292 static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
294 struct kernel_object *kernel_object;
295 struct list *list;
297 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
299 if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
300 kernel_object->manager = manager;
301 kernel_object->user_ptr = user_ptr;
302 kernel_object->object = obj;
303 kernel_object->owned = 0;
305 if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
307 /* kernel_object pointer already set */
308 free( kernel_object );
309 return NULL;
312 list_add_head( list, &kernel_object->list_entry );
313 return kernel_object;
316 static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
318 struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
319 return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
322 static void grab_kernel_object( struct kernel_object *ptr )
324 if (!ptr->owned)
326 grab_object( ptr->object );
327 ptr->owned = 1;
331 static void irp_call_dump( struct object *obj, int verbose )
333 struct irp_call *irp = (struct irp_call *)obj;
334 fprintf( stderr, "IRP call file=%p\n", irp->file );
337 static void irp_call_destroy( struct object *obj )
339 struct irp_call *irp = (struct irp_call *)obj;
341 if (irp->async)
343 async_terminate( irp->async, STATUS_CANCELLED );
344 release_object( irp->async );
346 if (irp->iosb) release_object( irp->iosb );
347 if (irp->file) release_object( irp->file );
348 if (irp->thread) release_object( irp->thread );
351 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
353 struct irp_call *irp;
355 if (file && !file->device->manager) /* it has been deleted */
357 set_error( STATUS_FILE_DELETED );
358 return NULL;
361 if ((irp = alloc_object( &irp_call_ops )))
363 irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
364 irp->thread = NULL;
365 irp->async = NULL;
366 irp->params = *params;
367 irp->iosb = NULL;
368 irp->canceled = 0;
369 irp->user_ptr = 0;
371 if (async) irp->iosb = async_get_iosb( async );
373 return irp;
376 static void set_irp_result( struct irp_call *irp, unsigned int status,
377 const void *out_data, data_size_t out_size, data_size_t result )
379 struct device_file *file = irp->file;
381 if (!file) return; /* already finished */
383 /* remove it from the device queue */
384 list_remove( &irp->dev_entry );
385 irp->file = NULL;
386 if (irp->async)
388 out_size = min( irp->iosb->out_size, out_size );
389 async_request_complete_alloc( irp->async, status, result, out_size, out_data );
390 release_object( irp->async );
391 irp->async = NULL;
394 release_object( irp ); /* no longer on the device queue */
395 release_object( file );
399 static void device_dump( struct object *obj, int verbose )
401 fputs( "Device\n", stderr );
404 static void device_destroy( struct object *obj )
406 struct device *device = (struct device *)obj;
408 assert( list_empty( &device->files ));
410 free( device->unix_path );
411 if (device->manager) list_remove( &device->entry );
414 static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
416 grab_object( irp ); /* grab reference for queued irp */
417 irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
418 if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
419 list_add_tail( &manager->requests, &irp->mgr_entry );
420 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
423 static struct object *device_open_file( struct object *obj, unsigned int access,
424 unsigned int sharing, unsigned int options )
426 struct device *device = (struct device *)obj;
427 struct device_file *file;
428 struct unicode_str nt_name;
430 if (!(file = alloc_object( &device_file_ops ))) return NULL;
432 file->device = (struct device *)grab_object( device );
433 file->closed = 0;
434 list_init( &file->kernel_object );
435 list_init( &file->requests );
436 list_add_tail( &device->files, &file->entry );
437 if (device->unix_path)
439 mode_t mode = 0666;
440 access = file->obj.ops->map_access( &file->obj, access );
441 nt_name.str = device->obj.ops->get_full_name( &device->obj, &nt_name.len );
442 file->fd = open_fd( NULL, device->unix_path, nt_name, O_NONBLOCK, &mode, access, sharing, options );
443 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
445 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
447 if (!file->fd)
449 release_object( file );
450 return NULL;
453 allow_fd_caching( file->fd );
455 if (device->manager)
457 struct irp_call *irp;
458 irp_params_t params;
460 memset( &params, 0, sizeof(params) );
461 params.create.type = IRP_CALL_CREATE;
462 params.create.access = access;
463 params.create.sharing = sharing;
464 params.create.options = options;
465 params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
467 if ((irp = create_irp( file, &params, NULL )))
469 add_irp_to_queue( device->manager, irp, current );
470 release_object( irp );
473 return &file->obj;
476 static struct list *device_get_kernel_obj_list( struct object *obj )
478 struct device *device = (struct device *)obj;
479 return &device->kernel_object;
482 static void device_file_dump( struct object *obj, int verbose )
484 struct device_file *file = (struct device_file *)obj;
486 fprintf( stderr, "File on device %p\n", file->device );
489 static struct fd *device_file_get_fd( struct object *obj )
491 struct device_file *file = (struct device_file *)obj;
493 return (struct fd *)grab_object( file->fd );
496 static WCHAR *device_file_get_full_name( struct object *obj, data_size_t *len )
498 struct device_file *file = (struct device_file *)obj;
499 return file->device->obj.ops->get_full_name( &file->device->obj, len );
502 static struct list *device_file_get_kernel_obj_list( struct object *obj )
504 struct device_file *file = (struct device_file *)obj;
505 return &file->kernel_object;
508 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
510 struct device_file *file = (struct device_file *)obj;
512 if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
514 struct irp_call *irp;
515 irp_params_t params;
517 file->closed = 1;
518 memset( &params, 0, sizeof(params) );
519 params.close.type = IRP_CALL_CLOSE;
521 if ((irp = create_irp( file, &params, NULL )))
523 add_irp_to_queue( file->device->manager, irp, current );
524 release_object( irp );
527 return 1;
530 static void device_file_destroy( struct object *obj )
532 struct device_file *file = (struct device_file *)obj;
533 struct irp_call *irp, *next;
535 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
537 list_remove( &irp->dev_entry );
538 release_object( irp ); /* no longer on the device queue */
540 if (file->fd) release_object( file->fd );
541 list_remove( &file->entry );
542 release_object( file->device );
545 static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
547 switch (irp->params.type)
549 case IRP_CALL_NONE:
550 case IRP_CALL_FREE:
551 case IRP_CALL_CANCEL:
552 break;
553 case IRP_CALL_CREATE:
554 irp->params.create.file = alloc_handle( current->process, irp->file,
555 irp->params.create.access, 0 );
556 if (!irp->params.create.file) return 0;
557 break;
558 case IRP_CALL_CLOSE:
559 irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
560 break;
561 case IRP_CALL_READ:
562 irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
563 irp->params.read.out_size = irp->iosb->out_size;
564 break;
565 case IRP_CALL_WRITE:
566 irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
567 break;
568 case IRP_CALL_FLUSH:
569 irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
570 break;
571 case IRP_CALL_IOCTL:
572 irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
573 irp->params.ioctl.out_size = irp->iosb->out_size;
574 break;
575 case IRP_CALL_VOLUME:
576 irp->params.volume.file = get_kernel_object_ptr( manager, &irp->file->obj );
577 irp->params.volume.out_size = irp->iosb->out_size;
578 break;
581 *params = irp->params;
582 return 1;
585 static void free_irp_params( struct irp_call *irp )
587 switch (irp->params.type)
589 case IRP_CALL_CREATE:
590 close_handle( current->process, irp->params.create.file );
591 break;
592 default:
593 break;
597 /* queue an irp to the device */
598 static void queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
600 struct irp_call *irp = create_irp( file, params, async );
601 if (!irp) return;
603 fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
604 irp->async = (struct async *)grab_object( async );
605 add_irp_to_queue( file->device->manager, irp, current );
606 release_object( irp );
607 async_set_unknown_status( async );
610 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
612 return FD_TYPE_DEVICE;
615 static void device_file_get_volume_info( struct fd *fd, struct async *async, unsigned int info_class )
617 struct device_file *file = get_fd_user( fd );
618 irp_params_t params;
620 memset( &params, 0, sizeof(params) );
621 params.volume.type = IRP_CALL_VOLUME;
622 params.volume.info_class = info_class;
623 queue_irp( file, &params, async );
626 static void device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
628 struct device_file *file = get_fd_user( fd );
629 irp_params_t params;
631 memset( &params, 0, sizeof(params) );
632 params.read.type = IRP_CALL_READ;
633 params.read.key = 0;
634 params.read.pos = pos;
635 queue_irp( file, &params, async );
638 static void device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
640 struct device_file *file = get_fd_user( fd );
641 irp_params_t params;
643 memset( &params, 0, sizeof(params) );
644 params.write.type = IRP_CALL_WRITE;
645 params.write.key = 0;
646 params.write.pos = pos;
647 queue_irp( file, &params, async );
650 static void device_file_flush( struct fd *fd, struct async *async )
652 struct device_file *file = get_fd_user( fd );
653 irp_params_t params;
655 memset( &params, 0, sizeof(params) );
656 params.flush.type = IRP_CALL_FLUSH;
657 queue_irp( file, &params, async );
660 static void device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
662 struct device_file *file = get_fd_user( fd );
663 irp_params_t params;
665 memset( &params, 0, sizeof(params) );
666 params.ioctl.type = IRP_CALL_IOCTL;
667 params.ioctl.code = code;
668 queue_irp( file, &params, async );
671 static void cancel_irp_call( struct irp_call *irp )
673 struct irp_call *cancel_irp;
674 irp_params_t params;
676 irp->canceled = 1;
677 if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
679 memset( &params, 0, sizeof(params) );
680 params.cancel.type = IRP_CALL_CANCEL;
681 params.cancel.irp = irp->user_ptr;
683 if ((cancel_irp = create_irp( NULL, &params, NULL )))
685 add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
686 release_object( cancel_irp );
690 static void device_file_cancel_async( struct fd *fd, struct async *async )
692 struct device_file *file = get_fd_user( fd );
693 struct irp_call *irp;
695 LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
697 if (irp->async == async)
699 cancel_irp_call( irp );
700 return;
705 static struct device *create_device( struct object *root, const struct unicode_str *name,
706 struct device_manager *manager )
708 struct device *device;
710 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
712 device->unix_path = NULL;
713 device->manager = manager;
714 grab_object( device );
715 list_add_tail( &manager->devices, &device->entry );
716 list_init( &device->kernel_object );
717 list_init( &device->files );
719 return device;
722 struct object *create_unix_device( struct object *root, const struct unicode_str *name,
723 unsigned int attr, const struct security_descriptor *sd,
724 const char *unix_path )
726 struct device *device;
728 if ((device = create_named_object( root, &device_ops, name, attr, sd )))
730 device->unix_path = strdup( unix_path );
731 device->manager = NULL; /* no manager, requests go straight to the Unix device */
732 list_init( &device->kernel_object );
733 list_init( &device->files );
735 return &device->obj;
739 /* terminate requests when the underlying device is deleted */
740 static void delete_file( struct device_file *file )
742 struct irp_call *irp, *next;
744 /* the pending requests may be the only thing holding a reference to the file */
745 grab_object( file );
747 /* terminate all pending requests */
748 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
750 list_remove( &irp->mgr_entry );
751 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
754 release_object( file );
757 static void delete_device( struct device *device )
759 struct device_file *file, *next;
761 if (!device->manager) return; /* already deleted */
763 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
764 delete_file( file );
766 unlink_named_object( &device->obj );
767 list_remove( &device->entry );
768 device->manager = NULL;
769 release_object( device );
773 static void device_manager_dump( struct object *obj, int verbose )
775 fprintf( stderr, "Device manager\n" );
778 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
780 struct device_manager *manager = (struct device_manager *)obj;
782 return !list_empty( &manager->requests );
785 static void device_manager_destroy( struct object *obj )
787 struct device_manager *manager = (struct device_manager *)obj;
788 struct kernel_object *kernel_object;
789 struct list *ptr;
791 if (manager->current_call)
793 release_object( manager->current_call );
794 manager->current_call = NULL;
797 while (manager->kernel_objects.root)
799 kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
800 wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
801 list_remove( &kernel_object->list_entry );
802 if (kernel_object->owned) release_object( kernel_object->object );
803 free( kernel_object );
806 while ((ptr = list_head( &manager->devices )))
808 struct device *device = LIST_ENTRY( ptr, struct device, entry );
809 delete_device( device );
812 while ((ptr = list_head( &manager->requests )))
814 struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
815 list_remove( &irp->mgr_entry );
816 assert( !irp->file && !irp->async );
817 release_object( irp );
821 static struct device_manager *create_device_manager(void)
823 struct device_manager *manager;
825 if ((manager = alloc_object( &device_manager_ops )))
827 manager->current_call = NULL;
828 list_init( &manager->devices );
829 list_init( &manager->requests );
830 wine_rb_init( &manager->kernel_objects, compare_kernel_object );
832 return manager;
835 void free_kernel_objects( struct object *obj )
837 struct list *ptr, *list;
839 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
841 while ((ptr = list_head( list )))
843 struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
844 struct irp_call *irp;
845 irp_params_t params;
847 assert( !kernel_object->owned );
849 memset( &params, 0, sizeof(params) );
850 params.free.type = IRP_CALL_FREE;
851 params.free.obj = kernel_object->user_ptr;
853 if ((irp = create_irp( NULL, &params, NULL )))
855 add_irp_to_queue( kernel_object->manager, irp, NULL );
856 release_object( irp );
859 list_remove( &kernel_object->list_entry );
860 wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
861 free( kernel_object );
866 /* create a device manager */
867 DECL_HANDLER(create_device_manager)
869 struct device_manager *manager = create_device_manager();
871 if (manager)
873 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
874 release_object( manager );
879 /* create a device */
880 DECL_HANDLER(create_device)
882 struct device *device;
883 struct unicode_str name = get_req_unicode_str();
884 struct device_manager *manager;
885 struct object *root = NULL;
887 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
888 0, &device_manager_ops )))
889 return;
891 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
893 release_object( manager );
894 return;
897 if ((device = create_device( root, &name, manager )))
899 struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
900 if (ptr)
901 grab_kernel_object( ptr );
902 else
903 set_error( STATUS_NO_MEMORY );
904 release_object( device );
907 if (root) release_object( root );
908 release_object( manager );
912 /* delete a device */
913 DECL_HANDLER(delete_device)
915 struct device_manager *manager;
916 struct kernel_object *ref;
917 struct device *device;
919 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
920 0, &device_manager_ops )))
921 return;
923 if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
925 device = (struct device *)grab_object( ref->object );
926 delete_device( device );
927 release_object( device );
929 else set_error( STATUS_INVALID_HANDLE );
931 release_object( manager );
935 /* retrieve the next pending device irp request */
936 DECL_HANDLER(get_next_device_request)
938 struct irp_call *irp;
939 struct device_manager *manager;
940 struct list *ptr;
941 struct iosb *iosb;
943 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
944 0, &device_manager_ops )))
945 return;
947 /* process result of previous call */
948 if (manager->current_call)
950 irp = manager->current_call;
951 irp->user_ptr = req->user_ptr;
953 if (irp->async)
955 if (req->pending)
956 set_async_pending( irp->async );
957 async_set_initial_status( irp->async, req->status );
959 if (req->prev)
961 set_irp_result( irp, req->iosb_status, get_req_data(), get_req_data_size(), req->result );
963 else
965 async_wake_obj( irp->async );
966 if (irp->canceled)
968 /* if it was canceled during dispatch, we couldn't queue cancel
969 * call without client pointer, so we need to do it now */
970 cancel_irp_call( irp );
974 else
976 set_irp_result( irp, req->status, NULL, 0, 0 );
979 if (req->prev)
980 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
982 free_irp_params( irp );
983 release_object( irp );
984 manager->current_call = NULL;
987 clear_error();
989 if ((ptr = list_head( &manager->requests )))
991 struct thread *thread;
993 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
995 thread = irp->thread ? irp->thread : current;
996 reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
997 reply->client_tid = get_thread_id( thread );
999 iosb = irp->iosb;
1000 if (iosb)
1001 reply->in_size = iosb->in_size;
1003 if (iosb && iosb->in_size > get_reply_max_size())
1004 set_error( STATUS_BUFFER_OVERFLOW );
1005 else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
1007 if (fill_irp_params( manager, irp, &reply->params ))
1009 if (iosb)
1011 set_reply_data_ptr( iosb->in_data, iosb->in_size );
1012 iosb->in_data = NULL;
1013 iosb->in_size = 0;
1015 list_remove( &irp->mgr_entry );
1016 list_init( &irp->mgr_entry );
1017 /* we already own the object if it's only on manager queue */
1018 if (irp->file) grab_object( irp );
1019 manager->current_call = irp;
1021 else close_handle( current->process, reply->next );
1024 else set_error( STATUS_PENDING );
1026 release_object( manager );
1030 /* store results of an async irp */
1031 DECL_HANDLER(set_irp_result)
1033 struct irp_call *irp;
1035 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
1037 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
1038 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
1039 release_object( irp );
1044 /* get kernel pointer from server object */
1045 DECL_HANDLER(get_kernel_object_ptr)
1047 struct device_manager *manager;
1048 struct object *object = NULL;
1050 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1051 0, &device_manager_ops )))
1052 return;
1054 if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
1056 reply->user_ptr = get_kernel_object_ptr( manager, object );
1057 release_object( object );
1060 release_object( manager );
1064 /* associate kernel pointer with server object */
1065 DECL_HANDLER(set_kernel_object_ptr)
1067 struct device_manager *manager;
1068 struct object *object = NULL;
1070 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1071 0, &device_manager_ops )))
1072 return;
1074 if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
1076 release_object( manager );
1077 return;
1080 if (!set_kernel_object( manager, object, req->user_ptr ))
1081 set_error( STATUS_INVALID_HANDLE );
1083 release_object( object );
1084 release_object( manager );
1088 /* grab server object reference from kernel object pointer */
1089 DECL_HANDLER(grab_kernel_object)
1091 struct device_manager *manager;
1092 struct kernel_object *ref;
1094 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1095 0, &device_manager_ops )))
1096 return;
1098 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
1099 grab_kernel_object( ref );
1100 else
1101 set_error( STATUS_INVALID_HANDLE );
1103 release_object( manager );
1107 /* release server object reference from kernel object pointer */
1108 DECL_HANDLER(release_kernel_object)
1110 struct device_manager *manager;
1111 struct kernel_object *ref;
1113 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1114 0, &device_manager_ops )))
1115 return;
1117 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
1119 ref->owned = 0;
1120 release_object( ref->object );
1122 else set_error( STATUS_INVALID_HANDLE );
1124 release_object( manager );
1128 /* get handle from kernel object pointer */
1129 DECL_HANDLER(get_kernel_object_handle)
1131 struct device_manager *manager;
1132 struct kernel_object *ref;
1134 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1135 0, &device_manager_ops )))
1136 return;
1138 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
1139 reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
1140 else
1141 set_error( STATUS_INVALID_HANDLE );
1143 release_object( manager );