msvcr120: Add [_]strtoimax[_l] and [_]strtoumax[_l].
[wine.git] / server / device.c
blob01e08f295f7cdfc6eb8c6462ead70a4e11764107
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
23 #include "wine/rbtree.h"
25 #include <assert.h>
26 #include <fcntl.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <stdarg.h>
31 #include "ntstatus.h"
32 #define WIN32_NO_STATUS
33 #include "windef.h"
34 #include "winternl.h"
35 #include "ddk/wdm.h"
37 #include "object.h"
38 #include "file.h"
39 #include "handle.h"
40 #include "request.h"
41 #include "process.h"
43 /* IRP object */
45 struct irp_call
47 struct object obj; /* object header */
48 struct list dev_entry; /* entry in device queue */
49 struct list mgr_entry; /* entry in manager queue */
50 struct device_file *file; /* file containing this irp */
51 struct thread *thread; /* thread that queued the irp */
52 struct async *async; /* pending async op */
53 irp_params_t params; /* irp parameters */
54 struct iosb *iosb; /* I/O status block */
55 int canceled; /* the call was canceled */
56 client_ptr_t user_ptr; /* client side pointer */
59 static void irp_call_dump( struct object *obj, int verbose );
60 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
61 static void irp_call_destroy( struct object *obj );
63 static const struct object_ops irp_call_ops =
65 sizeof(struct irp_call), /* size */
66 irp_call_dump, /* dump */
67 no_get_type, /* get_type */
68 add_queue, /* add_queue */
69 remove_queue, /* remove_queue */
70 irp_call_signaled, /* signaled */
71 no_satisfied, /* satisfied */
72 no_signal, /* signal */
73 no_get_fd, /* get_fd */
74 no_map_access, /* map_access */
75 default_get_sd, /* get_sd */
76 default_set_sd, /* set_sd */
77 no_lookup_name, /* lookup_name */
78 no_link_name, /* link_name */
79 NULL, /* unlink_name */
80 no_open_file, /* open_file */
81 no_kernel_obj_list, /* get_kernel_obj_list */
82 no_close_handle, /* close_handle */
83 irp_call_destroy /* destroy */
87 /* device manager (a list of devices managed by the same client process) */
89 struct device_manager
91 struct object obj; /* object header */
92 struct list devices; /* list of devices */
93 struct list requests; /* list of pending irps across all devices */
94 struct irp_call *current_call; /* call currently executed on client side */
95 struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
98 static void device_manager_dump( struct object *obj, int verbose );
99 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
100 static void device_manager_destroy( struct object *obj );
102 static const struct object_ops device_manager_ops =
104 sizeof(struct device_manager), /* size */
105 device_manager_dump, /* dump */
106 no_get_type, /* get_type */
107 add_queue, /* add_queue */
108 remove_queue, /* remove_queue */
109 device_manager_signaled, /* signaled */
110 no_satisfied, /* satisfied */
111 no_signal, /* signal */
112 no_get_fd, /* get_fd */
113 no_map_access, /* map_access */
114 default_get_sd, /* get_sd */
115 default_set_sd, /* set_sd */
116 no_lookup_name, /* lookup_name */
117 no_link_name, /* link_name */
118 NULL, /* unlink_name */
119 no_open_file, /* open_file */
120 no_kernel_obj_list, /* get_kernel_obj_list */
121 no_close_handle, /* close_handle */
122 device_manager_destroy /* destroy */
126 /* device (a single device object) */
128 struct device
130 struct object obj; /* object header */
131 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
132 char *unix_path; /* path to unix device if any */
133 struct list kernel_object; /* list of kernel object pointers */
134 struct list entry; /* entry in device manager list */
135 struct list files; /* list of open files */
138 static void device_dump( struct object *obj, int verbose );
139 static struct object_type *device_get_type( struct object *obj );
140 static void device_destroy( struct object *obj );
141 static struct object *device_open_file( struct object *obj, unsigned int access,
142 unsigned int sharing, unsigned int options );
143 static struct list *device_get_kernel_obj_list( struct object *obj );
145 static const struct object_ops device_ops =
147 sizeof(struct device), /* size */
148 device_dump, /* dump */
149 device_get_type, /* get_type */
150 no_add_queue, /* add_queue */
151 NULL, /* remove_queue */
152 NULL, /* signaled */
153 no_satisfied, /* satisfied */
154 no_signal, /* signal */
155 no_get_fd, /* get_fd */
156 default_fd_map_access, /* map_access */
157 default_get_sd, /* get_sd */
158 default_set_sd, /* set_sd */
159 no_lookup_name, /* lookup_name */
160 directory_link_name, /* link_name */
161 default_unlink_name, /* unlink_name */
162 device_open_file, /* open_file */
163 device_get_kernel_obj_list, /* get_kernel_obj_list */
164 no_close_handle, /* close_handle */
165 device_destroy /* destroy */
169 /* device file (an open file handle to a device) */
171 struct device_file
173 struct object obj; /* object header */
174 struct device *device; /* device for this file */
175 struct fd *fd; /* file descriptor for irp */
176 struct list kernel_object; /* list of kernel object pointers */
177 int closed; /* closed file flag */
178 struct list entry; /* entry in device list */
179 struct list requests; /* list of pending irp requests */
182 static void device_file_dump( struct object *obj, int verbose );
183 static struct fd *device_file_get_fd( struct object *obj );
184 static struct list *device_file_get_kernel_obj_list( struct object *obj );
185 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
186 static void device_file_destroy( struct object *obj );
187 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
188 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
189 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
190 static int device_file_flush( struct fd *fd, struct async *async );
191 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
192 static void device_file_reselect_async( struct fd *fd, struct async_queue *queue );
194 static const struct object_ops device_file_ops =
196 sizeof(struct device_file), /* size */
197 device_file_dump, /* dump */
198 file_get_type, /* get_type */
199 add_queue, /* add_queue */
200 remove_queue, /* remove_queue */
201 default_fd_signaled, /* signaled */
202 no_satisfied, /* satisfied */
203 no_signal, /* signal */
204 device_file_get_fd, /* get_fd */
205 default_fd_map_access, /* map_access */
206 default_get_sd, /* get_sd */
207 default_set_sd, /* set_sd */
208 no_lookup_name, /* lookup_name */
209 no_link_name, /* link_name */
210 NULL, /* unlink_name */
211 no_open_file, /* open_file */
212 device_file_get_kernel_obj_list, /* get_kernel_obj_list */
213 device_file_close_handle, /* close_handle */
214 device_file_destroy /* destroy */
217 static const struct fd_ops device_file_fd_ops =
219 default_fd_get_poll_events, /* get_poll_events */
220 default_poll_event, /* poll_event */
221 device_file_get_fd_type, /* get_fd_type */
222 device_file_read, /* read */
223 device_file_write, /* write */
224 device_file_flush, /* flush */
225 default_fd_get_file_info, /* get_file_info */
226 no_fd_get_volume_info, /* get_volume_info */
227 device_file_ioctl, /* ioctl */
228 default_fd_queue_async, /* queue_async */
229 device_file_reselect_async /* reselect_async */
233 struct list *no_kernel_obj_list( struct object *obj )
235 return NULL;
238 struct kernel_object
240 struct device_manager *manager;
241 client_ptr_t user_ptr;
242 struct object *object;
243 int owned;
244 struct list list_entry;
245 struct wine_rb_entry rb_entry;
248 static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
250 struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
251 return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
254 static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
256 struct kernel_object *kernel_object;
257 struct list *list;
259 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
260 LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
262 if (kernel_object->manager != manager) continue;
263 return kernel_object;
265 return NULL;
268 static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
270 struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
271 return kernel_object ? kernel_object->user_ptr : 0;
274 static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
276 struct kernel_object *kernel_object;
277 struct list *list;
279 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
281 if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
282 kernel_object->manager = manager;
283 kernel_object->user_ptr = user_ptr;
284 kernel_object->object = obj;
285 kernel_object->owned = 0;
287 if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
289 /* kernel_object pointer already set */
290 free( kernel_object );
291 return NULL;
294 list_add_head( list, &kernel_object->list_entry );
295 return kernel_object;
298 static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
300 struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
301 return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
304 static void grab_kernel_object( struct kernel_object *ptr )
306 if (!ptr->owned)
308 grab_object( ptr->object );
309 ptr->owned = 1;
313 static void irp_call_dump( struct object *obj, int verbose )
315 struct irp_call *irp = (struct irp_call *)obj;
316 fprintf( stderr, "IRP call file=%p\n", irp->file );
319 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
321 struct irp_call *irp = (struct irp_call *)obj;
323 return !irp->file; /* file is cleared once the irp has completed */
326 static void irp_call_destroy( struct object *obj )
328 struct irp_call *irp = (struct irp_call *)obj;
330 if (irp->async)
332 async_terminate( irp->async, STATUS_CANCELLED );
333 release_object( irp->async );
335 if (irp->iosb) release_object( irp->iosb );
336 if (irp->file) release_object( irp->file );
337 if (irp->thread) release_object( irp->thread );
340 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
342 struct irp_call *irp;
344 if (file && !file->device->manager) /* it has been deleted */
346 set_error( STATUS_FILE_DELETED );
347 return NULL;
350 if ((irp = alloc_object( &irp_call_ops )))
352 irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
353 irp->thread = NULL;
354 irp->async = NULL;
355 irp->params = *params;
356 irp->iosb = NULL;
357 irp->canceled = 0;
358 irp->user_ptr = 0;
360 if (async) irp->iosb = async_get_iosb( async );
361 if (!irp->iosb && !(irp->iosb = create_iosb( NULL, 0, 0 )))
363 release_object( irp );
364 irp = NULL;
367 return irp;
370 static void set_irp_result( struct irp_call *irp, unsigned int status,
371 const void *out_data, data_size_t out_size, data_size_t result )
373 struct device_file *file = irp->file;
374 struct iosb *iosb = irp->iosb;
376 if (!file) return; /* already finished */
378 /* FIXME: handle the STATUS_PENDING case */
379 iosb->status = status;
380 iosb->result = result;
381 iosb->out_size = min( iosb->out_size, out_size );
382 if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
383 iosb->out_size = 0;
385 /* remove it from the device queue */
386 list_remove( &irp->dev_entry );
387 irp->file = NULL;
388 if (irp->async)
390 if (result) status = STATUS_ALERTED;
391 async_terminate( irp->async, status );
392 release_object( irp->async );
393 irp->async = NULL;
395 wake_up( &irp->obj, 0 );
397 release_object( irp ); /* no longer on the device queue */
398 release_object( file );
402 static void device_dump( struct object *obj, int verbose )
404 fputs( "Device\n", stderr );
407 static struct object_type *device_get_type( struct object *obj )
409 static const WCHAR name[] = {'D','e','v','i','c','e'};
410 static const struct unicode_str str = { name, sizeof(name) };
411 return get_object_type( &str );
414 static void device_destroy( struct object *obj )
416 struct device *device = (struct device *)obj;
418 assert( list_empty( &device->files ));
420 free( device->unix_path );
421 if (device->manager) list_remove( &device->entry );
424 static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
426 grab_object( irp ); /* grab reference for queued irp */
427 irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
428 if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
429 list_add_tail( &manager->requests, &irp->mgr_entry );
430 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
433 static struct object *device_open_file( struct object *obj, unsigned int access,
434 unsigned int sharing, unsigned int options )
436 struct device *device = (struct device *)obj;
437 struct device_file *file;
439 if (!(file = alloc_object( &device_file_ops ))) return NULL;
441 file->device = (struct device *)grab_object( device );
442 file->closed = 0;
443 list_init( &file->kernel_object );
444 list_init( &file->requests );
445 list_add_tail( &device->files, &file->entry );
446 if (device->unix_path)
448 mode_t mode = 0666;
449 access = file->obj.ops->map_access( &file->obj, access );
450 file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
451 &mode, access, sharing, options );
452 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
454 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
456 if (!file->fd)
458 release_object( file );
459 return NULL;
462 allow_fd_caching( file->fd );
464 if (device->manager)
466 struct irp_call *irp;
467 irp_params_t params;
469 memset( &params, 0, sizeof(params) );
470 params.create.type = IRP_CALL_CREATE;
471 params.create.access = access;
472 params.create.sharing = sharing;
473 params.create.options = options;
474 params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
476 if ((irp = create_irp( file, &params, NULL )))
478 add_irp_to_queue( device->manager, irp, current );
479 release_object( irp );
482 return &file->obj;
485 static struct list *device_get_kernel_obj_list( struct object *obj )
487 struct device *device = (struct device *)obj;
488 return &device->kernel_object;
491 static void device_file_dump( struct object *obj, int verbose )
493 struct device_file *file = (struct device_file *)obj;
495 fprintf( stderr, "File on device %p\n", file->device );
498 static struct fd *device_file_get_fd( struct object *obj )
500 struct device_file *file = (struct device_file *)obj;
502 return (struct fd *)grab_object( file->fd );
505 static struct list *device_file_get_kernel_obj_list( struct object *obj )
507 struct device_file *file = (struct device_file *)obj;
508 return &file->kernel_object;
511 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
513 struct device_file *file = (struct device_file *)obj;
515 if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
517 struct irp_call *irp;
518 irp_params_t params;
520 file->closed = 1;
521 memset( &params, 0, sizeof(params) );
522 params.close.type = IRP_CALL_CLOSE;
524 if ((irp = create_irp( file, &params, NULL )))
526 add_irp_to_queue( file->device->manager, irp, current );
527 release_object( irp );
530 return 1;
533 static void device_file_destroy( struct object *obj )
535 struct device_file *file = (struct device_file *)obj;
536 struct irp_call *irp, *next;
538 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
540 list_remove( &irp->dev_entry );
541 release_object( irp ); /* no longer on the device queue */
543 if (file->fd) release_object( file->fd );
544 list_remove( &file->entry );
545 release_object( file->device );
548 static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
550 switch (irp->params.type)
552 case IRP_CALL_NONE:
553 case IRP_CALL_FREE:
554 case IRP_CALL_CANCEL:
555 break;
556 case IRP_CALL_CREATE:
557 irp->params.create.file = alloc_handle( current->process, irp->file,
558 irp->params.create.access, 0 );
559 if (!irp->params.create.file) return 0;
560 break;
561 case IRP_CALL_CLOSE:
562 irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
563 break;
564 case IRP_CALL_READ:
565 irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
566 irp->params.read.out_size = irp->iosb->out_size;
567 break;
568 case IRP_CALL_WRITE:
569 irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
570 break;
571 case IRP_CALL_FLUSH:
572 irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
573 break;
574 case IRP_CALL_IOCTL:
575 irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
576 irp->params.ioctl.out_size = irp->iosb->out_size;
577 break;
580 *params = irp->params;
581 return 1;
584 static void free_irp_params( struct irp_call *irp )
586 switch (irp->params.type)
588 case IRP_CALL_CREATE:
589 close_handle( current->process, irp->params.create.file );
590 break;
591 default:
592 break;
596 /* queue an irp to the device */
597 static int queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
599 struct irp_call *irp = create_irp( file, params, async );
600 if (!irp) return 0;
602 fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
603 irp->async = (struct async *)grab_object( async );
604 add_irp_to_queue( file->device->manager, irp, current );
605 release_object( irp );
606 set_error( STATUS_PENDING );
607 return 0;
610 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
612 return FD_TYPE_DEVICE;
615 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
617 struct device_file *file = get_fd_user( fd );
618 irp_params_t params;
620 memset( &params, 0, sizeof(params) );
621 params.read.type = IRP_CALL_READ;
622 params.read.key = 0;
623 params.read.pos = pos;
624 return queue_irp( file, &params, async );
627 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
629 struct device_file *file = get_fd_user( fd );
630 irp_params_t params;
632 memset( &params, 0, sizeof(params) );
633 params.write.type = IRP_CALL_WRITE;
634 params.write.key = 0;
635 params.write.pos = pos;
636 return queue_irp( file, &params, async );
639 static int device_file_flush( struct fd *fd, struct async *async )
641 struct device_file *file = get_fd_user( fd );
642 irp_params_t params;
644 memset( &params, 0, sizeof(params) );
645 params.flush.type = IRP_CALL_FLUSH;
646 return queue_irp( file, &params, async );
649 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
651 struct device_file *file = get_fd_user( fd );
652 irp_params_t params;
654 memset( &params, 0, sizeof(params) );
655 params.ioctl.type = IRP_CALL_IOCTL;
656 params.ioctl.code = code;
657 return queue_irp( file, &params, async );
660 static void cancel_irp_call( struct irp_call *irp )
662 struct irp_call *cancel_irp;
663 irp_params_t params;
665 irp->canceled = 1;
666 if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
668 memset( &params, 0, sizeof(params) );
669 params.cancel.type = IRP_CALL_CANCEL;
670 params.cancel.irp = irp->user_ptr;
672 if ((cancel_irp = create_irp( NULL, &params, NULL )))
674 add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
675 release_object( cancel_irp );
678 set_irp_result( irp, STATUS_CANCELLED, NULL, 0, 0 );
681 static void device_file_reselect_async( struct fd *fd, struct async_queue *queue )
683 struct device_file *file = get_fd_user( fd );
684 struct irp_call *irp;
686 LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
687 if (irp->iosb->status != STATUS_PENDING)
689 cancel_irp_call( irp );
690 return;
694 static struct device *create_device( struct object *root, const struct unicode_str *name,
695 struct device_manager *manager )
697 struct device *device;
699 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
701 device->unix_path = NULL;
702 device->manager = manager;
703 grab_object( device );
704 list_add_tail( &manager->devices, &device->entry );
705 list_init( &device->kernel_object );
706 list_init( &device->files );
708 return device;
711 struct object *create_unix_device( struct object *root, const struct unicode_str *name,
712 const char *unix_path )
714 struct device *device;
716 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
718 device->unix_path = strdup( unix_path );
719 device->manager = NULL; /* no manager, requests go straight to the Unix device */
720 list_init( &device->kernel_object );
721 list_init( &device->files );
723 return &device->obj;
727 /* terminate requests when the underlying device is deleted */
728 static void delete_file( struct device_file *file )
730 struct irp_call *irp, *next;
732 /* the pending requests may be the only thing holding a reference to the file */
733 grab_object( file );
735 /* terminate all pending requests */
736 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
738 list_remove( &irp->mgr_entry );
739 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
742 release_object( file );
745 static void delete_device( struct device *device )
747 struct device_file *file, *next;
749 if (!device->manager) return; /* already deleted */
751 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
752 delete_file( file );
754 unlink_named_object( &device->obj );
755 list_remove( &device->entry );
756 device->manager = NULL;
757 release_object( device );
761 static void device_manager_dump( struct object *obj, int verbose )
763 fprintf( stderr, "Device manager\n" );
766 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
768 struct device_manager *manager = (struct device_manager *)obj;
770 return !list_empty( &manager->requests );
773 static void device_manager_destroy( struct object *obj )
775 struct device_manager *manager = (struct device_manager *)obj;
776 struct kernel_object *kernel_object;
777 struct list *ptr;
779 if (manager->current_call)
781 release_object( manager->current_call );
782 manager->current_call = NULL;
785 while (manager->kernel_objects.root)
787 kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
788 wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
789 list_remove( &kernel_object->list_entry );
790 if (kernel_object->owned) release_object( kernel_object->object );
791 free( kernel_object );
794 while ((ptr = list_head( &manager->devices )))
796 struct device *device = LIST_ENTRY( ptr, struct device, entry );
797 delete_device( device );
800 while ((ptr = list_head( &manager->requests )))
802 struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
803 list_remove( &irp->mgr_entry );
804 assert( !irp->file && !irp->async );
805 release_object( irp );
809 static struct device_manager *create_device_manager(void)
811 struct device_manager *manager;
813 if ((manager = alloc_object( &device_manager_ops )))
815 manager->current_call = NULL;
816 list_init( &manager->devices );
817 list_init( &manager->requests );
818 wine_rb_init( &manager->kernel_objects, compare_kernel_object );
820 return manager;
823 void free_kernel_objects( struct object *obj )
825 struct list *ptr, *list;
827 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
829 while ((ptr = list_head( list )))
831 struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
832 struct irp_call *irp;
833 irp_params_t params;
835 assert( !kernel_object->owned );
837 memset( &params, 0, sizeof(params) );
838 params.free.type = IRP_CALL_FREE;
839 params.free.obj = kernel_object->user_ptr;
841 if ((irp = create_irp( NULL, &params, NULL )))
843 add_irp_to_queue( kernel_object->manager, irp, NULL );
844 release_object( irp );
847 list_remove( &kernel_object->list_entry );
848 wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
849 free( kernel_object );
854 /* create a device manager */
855 DECL_HANDLER(create_device_manager)
857 struct device_manager *manager = create_device_manager();
859 if (manager)
861 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
862 release_object( manager );
867 /* create a device */
868 DECL_HANDLER(create_device)
870 struct device *device;
871 struct unicode_str name = get_req_unicode_str();
872 struct device_manager *manager;
873 struct object *root = NULL;
875 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
876 0, &device_manager_ops )))
877 return;
879 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
881 release_object( manager );
882 return;
885 if ((device = create_device( root, &name, manager )))
887 struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
888 if (ptr)
889 grab_kernel_object( ptr );
890 else
891 set_error( STATUS_NO_MEMORY );
892 release_object( device );
895 if (root) release_object( root );
896 release_object( manager );
900 /* delete a device */
901 DECL_HANDLER(delete_device)
903 struct device_manager *manager;
904 struct kernel_object *ref;
905 struct device *device;
907 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
908 0, &device_manager_ops )))
909 return;
911 if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
913 device = (struct device *)grab_object( ref->object );
914 delete_device( device );
915 release_object( device );
917 else set_error( STATUS_INVALID_HANDLE );
919 release_object( manager );
923 /* retrieve the next pending device irp request */
924 DECL_HANDLER(get_next_device_request)
926 struct irp_call *irp;
927 struct device_manager *manager;
928 struct list *ptr;
929 struct iosb *iosb;
931 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
932 0, &device_manager_ops )))
933 return;
935 if (req->prev) close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
937 /* process result of previous call */
938 if (manager->current_call)
940 irp = manager->current_call;
941 irp->user_ptr = req->user_ptr;
943 if (req->status)
944 set_irp_result( irp, req->status, NULL, 0, 0 );
945 if (irp->canceled)
946 /* if it was canceled during dispatch, we couldn't queue cancel call without client pointer,
947 * so we need to do it now */
948 cancel_irp_call( irp );
949 else if (irp->async)
950 set_async_pending( irp->async, irp->file && is_fd_overlapped( irp->file->fd ) );
952 free_irp_params( irp );
953 release_object( irp );
954 manager->current_call = NULL;
957 clear_error();
959 if ((ptr = list_head( &manager->requests )))
961 struct thread *thread;
963 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
965 thread = irp->thread ? irp->thread : current;
966 reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
967 reply->client_tid = get_thread_id( thread );
969 iosb = irp->iosb;
970 reply->in_size = iosb->in_size;
971 if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
972 else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
974 if (fill_irp_params( manager, irp, &reply->params ))
976 set_reply_data_ptr( iosb->in_data, iosb->in_size );
977 iosb->in_data = NULL;
978 iosb->in_size = 0;
979 list_remove( &irp->mgr_entry );
980 list_init( &irp->mgr_entry );
981 /* we already own the object if it's only on manager queue */
982 if (irp->file) grab_object( irp );
983 manager->current_call = irp;
985 else close_handle( current->process, reply->next );
988 else set_error( STATUS_PENDING );
990 release_object( manager );
994 /* store results of an async irp */
995 DECL_HANDLER(set_irp_result)
997 struct irp_call *irp;
999 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
1001 if (!irp->canceled)
1002 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
1003 else if(irp->user_ptr) /* cancel already queued */
1004 set_error( STATUS_MORE_PROCESSING_REQUIRED );
1005 else /* we may be still dispatching the IRP. don't bother queuing cancel if it's already complete */
1006 irp->canceled = 0;
1007 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
1008 release_object( irp );
1013 /* get kernel pointer from server object */
1014 DECL_HANDLER(get_kernel_object_ptr)
1016 struct device_manager *manager;
1017 struct object *object = NULL;
1019 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1020 0, &device_manager_ops )))
1021 return;
1023 if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
1025 reply->user_ptr = get_kernel_object_ptr( manager, object );
1026 release_object( object );
1029 release_object( manager );
1033 /* associate kernel pointer with server object */
1034 DECL_HANDLER(set_kernel_object_ptr)
1036 struct device_manager *manager;
1037 struct object *object = NULL;
1039 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1040 0, &device_manager_ops )))
1041 return;
1043 if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
1045 release_object( manager );
1046 return;
1049 if (!set_kernel_object( manager, object, req->user_ptr ))
1050 set_error( STATUS_INVALID_HANDLE );
1052 release_object( object );
1053 release_object( manager );
1057 /* grab server object reference from kernel object pointer */
1058 DECL_HANDLER(grab_kernel_object)
1060 struct device_manager *manager;
1061 struct kernel_object *ref;
1063 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1064 0, &device_manager_ops )))
1065 return;
1067 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
1068 grab_kernel_object( ref );
1069 else
1070 set_error( STATUS_INVALID_HANDLE );
1072 release_object( manager );
1076 /* release server object reference from kernel object pointer */
1077 DECL_HANDLER(release_kernel_object)
1079 struct device_manager *manager;
1080 struct kernel_object *ref;
1082 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1083 0, &device_manager_ops )))
1084 return;
1086 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
1088 ref->owned = 0;
1089 release_object( ref->object );
1091 else set_error( STATUS_INVALID_HANDLE );
1093 release_object( manager );
1097 /* get handle from kernel object pointer */
1098 DECL_HANDLER(get_kernel_object_handle)
1100 struct device_manager *manager;
1101 struct kernel_object *ref;
1103 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
1104 0, &device_manager_ops )))
1105 return;
1107 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
1108 reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
1109 else
1110 set_error( STATUS_INVALID_HANDLE );
1112 release_object( manager );