shlwapi: SHMapHandle should not set error when NULL is passed as hShared.
[wine.git] / server / device.c
blob5880ea620b928b04ad4ca3c3e10973868fee766a
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
23 #include "wine/rbtree.h"
25 #include <assert.h>
26 #include <fcntl.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <stdarg.h>
31 #include "ntstatus.h"
32 #define WIN32_NO_STATUS
33 #include "windef.h"
34 #include "winternl.h"
35 #include "ddk/wdm.h"
37 #include "object.h"
38 #include "file.h"
39 #include "handle.h"
40 #include "request.h"
41 #include "process.h"
43 /* IRP object */
45 struct irp_call
47 struct object obj; /* object header */
48 struct list dev_entry; /* entry in device queue */
49 struct list mgr_entry; /* entry in manager queue */
50 struct device_file *file; /* file containing this irp */
51 struct thread *thread; /* thread that queued the irp */
52 struct async *async; /* pending async op */
53 irp_params_t params; /* irp parameters */
54 struct iosb *iosb; /* I/O status block */
57 static void irp_call_dump( struct object *obj, int verbose );
58 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
59 static void irp_call_destroy( struct object *obj );
61 static const struct object_ops irp_call_ops =
63 sizeof(struct irp_call), /* size */
64 irp_call_dump, /* dump */
65 no_get_type, /* get_type */
66 add_queue, /* add_queue */
67 remove_queue, /* remove_queue */
68 irp_call_signaled, /* signaled */
69 no_satisfied, /* satisfied */
70 no_signal, /* signal */
71 no_get_fd, /* get_fd */
72 no_map_access, /* map_access */
73 default_get_sd, /* get_sd */
74 default_set_sd, /* set_sd */
75 no_lookup_name, /* lookup_name */
76 no_link_name, /* link_name */
77 NULL, /* unlink_name */
78 no_open_file, /* open_file */
79 no_kernel_obj_list, /* get_kernel_obj_list */
80 no_close_handle, /* close_handle */
81 irp_call_destroy /* destroy */
85 /* device manager (a list of devices managed by the same client process) */
87 struct device_manager
89 struct object obj; /* object header */
90 struct list devices; /* list of devices */
91 struct list requests; /* list of pending irps across all devices */
92 struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
95 static void device_manager_dump( struct object *obj, int verbose );
96 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
97 static void device_manager_destroy( struct object *obj );
99 static const struct object_ops device_manager_ops =
101 sizeof(struct device_manager), /* size */
102 device_manager_dump, /* dump */
103 no_get_type, /* get_type */
104 add_queue, /* add_queue */
105 remove_queue, /* remove_queue */
106 device_manager_signaled, /* signaled */
107 no_satisfied, /* satisfied */
108 no_signal, /* signal */
109 no_get_fd, /* get_fd */
110 no_map_access, /* map_access */
111 default_get_sd, /* get_sd */
112 default_set_sd, /* set_sd */
113 no_lookup_name, /* lookup_name */
114 no_link_name, /* link_name */
115 NULL, /* unlink_name */
116 no_open_file, /* open_file */
117 no_kernel_obj_list, /* get_kernel_obj_list */
118 no_close_handle, /* close_handle */
119 device_manager_destroy /* destroy */
123 /* device (a single device object) */
125 struct device
127 struct object obj; /* object header */
128 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
129 char *unix_path; /* path to unix device if any */
130 client_ptr_t user_ptr; /* opaque ptr for client side */
131 struct list entry; /* entry in device manager list */
132 struct list files; /* list of open files */
135 static void device_dump( struct object *obj, int verbose );
136 static struct object_type *device_get_type( struct object *obj );
137 static void device_destroy( struct object *obj );
138 static struct object *device_open_file( struct object *obj, unsigned int access,
139 unsigned int sharing, unsigned int options );
141 static const struct object_ops device_ops =
143 sizeof(struct device), /* size */
144 device_dump, /* dump */
145 device_get_type, /* get_type */
146 no_add_queue, /* add_queue */
147 NULL, /* remove_queue */
148 NULL, /* signaled */
149 no_satisfied, /* satisfied */
150 no_signal, /* signal */
151 no_get_fd, /* get_fd */
152 default_fd_map_access, /* map_access */
153 default_get_sd, /* get_sd */
154 default_set_sd, /* set_sd */
155 no_lookup_name, /* lookup_name */
156 directory_link_name, /* link_name */
157 default_unlink_name, /* unlink_name */
158 device_open_file, /* open_file */
159 no_kernel_obj_list, /* get_kernel_obj_list */
160 no_close_handle, /* close_handle */
161 device_destroy /* destroy */
165 /* device file (an open file handle to a device) */
167 struct device_file
169 struct object obj; /* object header */
170 struct device *device; /* device for this file */
171 struct fd *fd; /* file descriptor for irp */
172 client_ptr_t user_ptr; /* opaque ptr for client side */
173 struct list entry; /* entry in device list */
174 struct list requests; /* list of pending irp requests */
177 static void device_file_dump( struct object *obj, int verbose );
178 static struct fd *device_file_get_fd( struct object *obj );
179 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
180 static void device_file_destroy( struct object *obj );
181 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
182 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
183 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
184 static int device_file_flush( struct fd *fd, struct async *async );
185 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
187 static const struct object_ops device_file_ops =
189 sizeof(struct device_file), /* size */
190 device_file_dump, /* dump */
191 file_get_type, /* get_type */
192 add_queue, /* add_queue */
193 remove_queue, /* remove_queue */
194 default_fd_signaled, /* signaled */
195 no_satisfied, /* satisfied */
196 no_signal, /* signal */
197 device_file_get_fd, /* get_fd */
198 default_fd_map_access, /* map_access */
199 default_get_sd, /* get_sd */
200 default_set_sd, /* set_sd */
201 no_lookup_name, /* lookup_name */
202 no_link_name, /* link_name */
203 NULL, /* unlink_name */
204 no_open_file, /* open_file */
205 no_kernel_obj_list, /* get_kernel_obj_list */
206 device_file_close_handle, /* close_handle */
207 device_file_destroy /* destroy */
210 static const struct fd_ops device_file_fd_ops =
212 default_fd_get_poll_events, /* get_poll_events */
213 default_poll_event, /* poll_event */
214 device_file_get_fd_type, /* get_fd_type */
215 device_file_read, /* read */
216 device_file_write, /* write */
217 device_file_flush, /* flush */
218 default_fd_get_file_info, /* get_file_info */
219 no_fd_get_volume_info, /* get_volume_info */
220 device_file_ioctl, /* ioctl */
221 default_fd_queue_async, /* queue_async */
222 default_fd_reselect_async /* reselect_async */
226 struct list *no_kernel_obj_list( struct object *obj )
228 return NULL;
231 struct kernel_object
233 struct device_manager *manager;
234 client_ptr_t user_ptr;
235 struct object *object;
236 int owned;
237 struct list list_entry;
238 struct wine_rb_entry rb_entry;
241 static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
243 struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
244 return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
247 static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
249 struct kernel_object *kernel_object;
250 struct list *list;
252 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
253 LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
255 if (kernel_object->manager != manager) continue;
256 return kernel_object;
258 return NULL;
261 static client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
263 struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
264 return kernel_object ? kernel_object->user_ptr : 0;
267 static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
269 struct kernel_object *kernel_object;
270 struct list *list;
272 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
274 if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
275 kernel_object->manager = manager;
276 kernel_object->user_ptr = user_ptr;
277 kernel_object->object = obj;
278 kernel_object->owned = 0;
280 if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
282 /* kernel_object pointer already set */
283 free( kernel_object );
284 return NULL;
287 list_add_head( list, &kernel_object->list_entry );
288 return kernel_object;
291 static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
293 struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
294 return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
297 static void grab_kernel_object( struct kernel_object *ptr )
299 if (!ptr->owned)
301 grab_object( ptr->object );
302 ptr->owned = 1;
306 static void irp_call_dump( struct object *obj, int verbose )
308 struct irp_call *irp = (struct irp_call *)obj;
309 fprintf( stderr, "IRP call file=%p\n", irp->file );
312 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
314 struct irp_call *irp = (struct irp_call *)obj;
316 return !irp->file; /* file is cleared once the irp has completed */
319 static void irp_call_destroy( struct object *obj )
321 struct irp_call *irp = (struct irp_call *)obj;
323 if (irp->async)
325 async_terminate( irp->async, STATUS_CANCELLED );
326 release_object( irp->async );
328 if (irp->iosb) release_object( irp->iosb );
329 if (irp->file) release_object( irp->file );
330 if (irp->thread) release_object( irp->thread );
333 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
335 struct irp_call *irp;
337 if (file && !file->device->manager) /* it has been deleted */
339 set_error( STATUS_FILE_DELETED );
340 return NULL;
343 if ((irp = alloc_object( &irp_call_ops )))
345 irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
346 irp->thread = NULL;
347 irp->async = NULL;
348 irp->params = *params;
349 irp->iosb = NULL;
351 if (async) irp->iosb = async_get_iosb( async );
352 if (!irp->iosb && !(irp->iosb = create_iosb( NULL, 0, 0 )))
354 release_object( irp );
355 irp = NULL;
358 return irp;
361 static void set_irp_result( struct irp_call *irp, unsigned int status,
362 const void *out_data, data_size_t out_size, data_size_t result )
364 struct device_file *file = irp->file;
365 struct iosb *iosb = irp->iosb;
367 if (!file) return; /* already finished */
369 /* FIXME: handle the STATUS_PENDING case */
370 iosb->status = status;
371 iosb->result = result;
372 iosb->out_size = min( iosb->out_size, out_size );
373 if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
374 iosb->out_size = 0;
375 irp->file = NULL;
376 if (irp->async)
378 if (result) status = STATUS_ALERTED;
379 async_terminate( irp->async, status );
380 release_object( irp->async );
381 irp->async = NULL;
383 wake_up( &irp->obj, 0 );
385 /* remove it from the device queue */
386 list_remove( &irp->dev_entry );
387 release_object( irp ); /* no longer on the device queue */
388 release_object( file );
392 static void device_dump( struct object *obj, int verbose )
394 fputs( "Device\n", stderr );
397 static struct object_type *device_get_type( struct object *obj )
399 static const WCHAR name[] = {'D','e','v','i','c','e'};
400 static const struct unicode_str str = { name, sizeof(name) };
401 return get_object_type( &str );
404 static void device_destroy( struct object *obj )
406 struct device *device = (struct device *)obj;
408 assert( list_empty( &device->files ));
410 free( device->unix_path );
411 if (device->manager) list_remove( &device->entry );
414 static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
416 grab_object( irp ); /* grab reference for queued irp */
417 irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
418 if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
419 list_add_tail( &manager->requests, &irp->mgr_entry );
420 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
423 static struct object *device_open_file( struct object *obj, unsigned int access,
424 unsigned int sharing, unsigned int options )
426 struct device *device = (struct device *)obj;
427 struct device_file *file;
429 if (!(file = alloc_object( &device_file_ops ))) return NULL;
431 file->device = (struct device *)grab_object( device );
432 file->user_ptr = 0;
433 list_init( &file->requests );
434 list_add_tail( &device->files, &file->entry );
435 if (device->unix_path)
437 mode_t mode = 0666;
438 access = file->obj.ops->map_access( &file->obj, access );
439 file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
440 &mode, access, sharing, options );
441 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
443 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
445 if (!file->fd)
447 release_object( file );
448 return NULL;
451 allow_fd_caching( file->fd );
453 if (device->manager)
455 struct irp_call *irp;
456 irp_params_t params;
458 memset( &params, 0, sizeof(params) );
459 params.create.major = IRP_MJ_CREATE;
460 params.create.access = access;
461 params.create.sharing = sharing;
462 params.create.options = options;
463 params.create.device = file->device->user_ptr;
465 if ((irp = create_irp( file, &params, NULL )))
467 add_irp_to_queue( device->manager, irp, NULL );
468 release_object( irp );
471 return &file->obj;
474 static void device_file_dump( struct object *obj, int verbose )
476 struct device_file *file = (struct device_file *)obj;
478 fprintf( stderr, "File on device %p\n", file->device );
481 static struct fd *device_file_get_fd( struct object *obj )
483 struct device_file *file = (struct device_file *)obj;
485 return (struct fd *)grab_object( file->fd );
488 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
490 struct device_file *file = (struct device_file *)obj;
492 if (file->device->manager && obj->handle_count == 1) /* last handle */
494 struct irp_call *irp;
495 irp_params_t params;
497 memset( &params, 0, sizeof(params) );
498 params.close.major = IRP_MJ_CLOSE;
499 params.close.file = file->user_ptr;
501 if ((irp = create_irp( file, &params, NULL )))
503 add_irp_to_queue( file->device->manager, irp, NULL );
504 release_object( irp );
507 return 1;
510 static void device_file_destroy( struct object *obj )
512 struct device_file *file = (struct device_file *)obj;
513 struct irp_call *irp, *next;
515 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
517 list_remove( &irp->dev_entry );
518 release_object( irp ); /* no longer on the device queue */
520 if (file->fd) release_object( file->fd );
521 list_remove( &file->entry );
522 release_object( file->device );
525 static void set_file_user_ptr( struct device_file *file, client_ptr_t ptr )
527 struct irp_call *irp;
529 if (file->user_ptr == ptr) return; /* nothing to do */
531 file->user_ptr = ptr;
533 /* update already queued irps */
535 LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
537 switch (irp->params.major)
539 case IRP_MJ_CLOSE: irp->params.close.file = ptr; break;
540 case IRP_MJ_READ: irp->params.read.file = ptr; break;
541 case IRP_MJ_WRITE: irp->params.write.file = ptr; break;
542 case IRP_MJ_FLUSH_BUFFERS: irp->params.flush.file = ptr; break;
543 case IRP_MJ_DEVICE_CONTROL: irp->params.ioctl.file = ptr; break;
548 /* queue an irp to the device */
549 static int queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
551 struct irp_call *irp = create_irp( file, params, async );
552 if (!irp) return 0;
554 fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
555 irp->async = (struct async *)grab_object( async );
556 add_irp_to_queue( file->device->manager, irp, current );
557 release_object( irp );
558 set_error( STATUS_PENDING );
559 return 1;
562 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
564 return FD_TYPE_DEVICE;
567 static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
569 struct device_file *file = get_fd_user( fd );
570 irp_params_t params;
572 memset( &params, 0, sizeof(params) );
573 params.read.major = IRP_MJ_READ;
574 params.read.key = 0;
575 params.read.pos = pos;
576 params.read.file = file->user_ptr;
577 return queue_irp( file, &params, async );
580 static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
582 struct device_file *file = get_fd_user( fd );
583 irp_params_t params;
585 memset( &params, 0, sizeof(params) );
586 params.write.major = IRP_MJ_WRITE;
587 params.write.key = 0;
588 params.write.pos = pos;
589 params.write.file = file->user_ptr;
590 return queue_irp( file, &params, async );
593 static int device_file_flush( struct fd *fd, struct async *async )
595 struct device_file *file = get_fd_user( fd );
596 irp_params_t params;
598 memset( &params, 0, sizeof(params) );
599 params.flush.major = IRP_MJ_FLUSH_BUFFERS;
600 params.flush.file = file->user_ptr;
601 return queue_irp( file, &params, async );
604 static int device_file_ioctl( struct fd *fd, ioctl_code_t code, struct async *async )
606 struct device_file *file = get_fd_user( fd );
607 irp_params_t params;
609 memset( &params, 0, sizeof(params) );
610 params.ioctl.major = IRP_MJ_DEVICE_CONTROL;
611 params.ioctl.code = code;
612 params.ioctl.file = file->user_ptr;
613 return queue_irp( file, &params, async );
616 static struct device *create_device( struct object *root, const struct unicode_str *name,
617 struct device_manager *manager, unsigned int attr )
619 struct device *device;
621 if ((device = create_named_object( root, &device_ops, name, attr, NULL )))
623 if (get_error() != STATUS_OBJECT_NAME_EXISTS)
625 /* initialize it if it didn't already exist */
626 device->unix_path = NULL;
627 device->manager = manager;
628 list_add_tail( &manager->devices, &device->entry );
629 list_init( &device->files );
632 return device;
635 struct object *create_unix_device( struct object *root, const struct unicode_str *name,
636 const char *unix_path )
638 struct device *device;
640 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
642 device->unix_path = strdup( unix_path );
643 device->manager = NULL; /* no manager, requests go straight to the Unix device */
644 list_init( &device->files );
646 return &device->obj;
650 /* terminate requests when the underlying device is deleted */
651 static void delete_file( struct device_file *file )
653 struct irp_call *irp, *next;
655 /* terminate all pending requests */
656 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
658 list_remove( &irp->mgr_entry );
659 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
663 static void delete_device( struct device *device )
665 struct device_file *file, *next;
667 if (!device->manager) return; /* already deleted */
669 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
670 delete_file( file );
672 unlink_named_object( &device->obj );
673 list_remove( &device->entry );
674 device->manager = NULL;
678 static void device_manager_dump( struct object *obj, int verbose )
680 fprintf( stderr, "Device manager\n" );
683 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
685 struct device_manager *manager = (struct device_manager *)obj;
687 return !list_empty( &manager->requests );
690 static void device_manager_destroy( struct object *obj )
692 struct device_manager *manager = (struct device_manager *)obj;
693 struct kernel_object *kernel_object;
694 struct list *ptr;
696 while (manager->kernel_objects.root)
698 kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
699 wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
700 list_remove( &kernel_object->list_entry );
701 if (kernel_object->owned) release_object( kernel_object->object );
702 free( kernel_object );
705 while ((ptr = list_head( &manager->devices )))
707 struct device *device = LIST_ENTRY( ptr, struct device, entry );
708 delete_device( device );
711 while ((ptr = list_head( &manager->requests )))
713 struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
714 list_remove( &irp->mgr_entry );
715 assert( !irp->file && !irp->async );
716 release_object( irp );
720 static struct device_manager *create_device_manager(void)
722 struct device_manager *manager;
724 if ((manager = alloc_object( &device_manager_ops )))
726 list_init( &manager->devices );
727 list_init( &manager->requests );
728 wine_rb_init( &manager->kernel_objects, compare_kernel_object );
730 return manager;
733 void free_kernel_objects( struct object *obj )
735 struct list *ptr, *list;
737 if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
739 while ((ptr = list_head( list )))
741 struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
742 struct irp_call *irp;
743 irp_params_t params;
745 assert( !kernel_object->owned );
747 /* abuse IRP_MJ_CLEANUP to request client to free no longer valid kernel object */
748 memset( &params, 0, sizeof(params) );
749 params.cleanup.major = IRP_MJ_CLEANUP;
750 params.cleanup.obj = kernel_object->user_ptr;
752 if ((irp = create_irp( NULL, &params, NULL )))
754 add_irp_to_queue( kernel_object->manager, irp, NULL );
755 release_object( irp );
758 list_remove( &kernel_object->list_entry );
759 wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
760 free( kernel_object );
765 /* create a device manager */
766 DECL_HANDLER(create_device_manager)
768 struct device_manager *manager = create_device_manager();
770 if (manager)
772 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
773 release_object( manager );
778 /* create a device */
779 DECL_HANDLER(create_device)
781 struct device *device;
782 struct unicode_str name = get_req_unicode_str();
783 struct device_manager *manager;
784 struct object *root = NULL;
786 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
787 0, &device_manager_ops )))
788 return;
790 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
792 release_object( manager );
793 return;
796 if ((device = create_device( root, &name, manager, req->attributes )))
798 device->user_ptr = req->user_ptr;
799 reply->handle = alloc_handle( current->process, device, req->access, req->attributes );
800 release_object( device );
803 if (root) release_object( root );
804 release_object( manager );
808 /* delete a device */
809 DECL_HANDLER(delete_device)
811 struct device *device;
813 if ((device = (struct device *)get_handle_obj( current->process, req->handle, 0, &device_ops )))
815 delete_device( device );
816 release_object( device );
821 /* retrieve the next pending device irp request */
822 DECL_HANDLER(get_next_device_request)
824 struct irp_call *irp;
825 struct device_manager *manager;
826 struct list *ptr;
827 struct iosb *iosb;
829 reply->params.major = IRP_MJ_MAXIMUM_FUNCTION + 1;
831 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
832 0, &device_manager_ops )))
833 return;
835 if (req->prev)
837 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->prev, 0, &irp_call_ops )))
839 set_irp_result( irp, req->status, NULL, 0, 0 );
840 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
841 release_object( irp );
843 clear_error();
846 if ((ptr = list_head( &manager->requests )))
848 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
849 if (irp->thread)
851 reply->client_pid = get_process_id( irp->thread->process );
852 reply->client_tid = get_thread_id( irp->thread );
854 reply->params = irp->params;
855 iosb = irp->iosb;
856 reply->in_size = iosb->in_size;
857 reply->out_size = iosb->out_size;
858 if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
859 else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
861 set_reply_data_ptr( iosb->in_data, iosb->in_size );
862 iosb->in_data = NULL;
863 iosb->in_size = 0;
864 list_remove( &irp->mgr_entry );
865 list_init( &irp->mgr_entry );
866 if (!irp->file) release_object( irp ); /* no longer on manager queue */
869 else set_error( STATUS_PENDING );
871 release_object( manager );
875 /* store results of an async irp */
876 DECL_HANDLER(set_irp_result)
878 struct irp_call *irp;
880 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
882 if (irp->file) set_file_user_ptr( irp->file, req->file_ptr );
883 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
884 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
885 release_object( irp );
890 /* get kernel pointer from server object */
891 DECL_HANDLER(get_kernel_object_ptr)
893 struct device_manager *manager;
894 struct object *object = NULL;
896 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
897 0, &device_manager_ops )))
898 return;
900 if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
902 reply->user_ptr = get_kernel_object_ptr( manager, object );
903 release_object( object );
906 release_object( manager );
910 /* associate kernel pointer with server object */
911 DECL_HANDLER(set_kernel_object_ptr)
913 struct device_manager *manager;
914 struct object *object = NULL;
916 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
917 0, &device_manager_ops )))
918 return;
920 if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
922 release_object( manager );
923 return;
926 if (!set_kernel_object( manager, object, req->user_ptr ))
927 set_error( STATUS_INVALID_HANDLE );
929 release_object( object );
930 release_object( manager );
934 /* grab server object reference from kernel object pointer */
935 DECL_HANDLER(grab_kernel_object)
937 struct device_manager *manager;
938 struct kernel_object *ref;
940 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
941 0, &device_manager_ops )))
942 return;
944 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
945 grab_kernel_object( ref );
946 else
947 set_error( STATUS_INVALID_HANDLE );
949 release_object( manager );
953 /* release server object reference from kernel object pointer */
954 DECL_HANDLER(release_kernel_object)
956 struct device_manager *manager;
957 struct kernel_object *ref;
959 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
960 0, &device_manager_ops )))
961 return;
963 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
965 ref->owned = 0;
966 release_object( ref->object );
968 else set_error( STATUS_INVALID_HANDLE );
970 release_object( manager );
974 /* get handle from kernel object pointer */
975 DECL_HANDLER(get_kernel_object_handle)
977 struct device_manager *manager;
978 struct kernel_object *ref;
980 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
981 0, &device_manager_ops )))
982 return;
984 if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
985 reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
986 else
987 set_error( STATUS_INVALID_HANDLE );
989 release_object( manager );