ntdll: Fix IOCTL_DVD_READ_STRUCTURE expected output size.
[wine.git] / server / device.c
blobdc0325ef797908f1213a72a1ff469670e9ab7d98
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <stdarg.h>
30 #include "ntstatus.h"
31 #define WIN32_NO_STATUS
32 #include "windef.h"
33 #include "winternl.h"
34 #include "ddk/wdm.h"
36 #include "object.h"
37 #include "file.h"
38 #include "handle.h"
39 #include "request.h"
40 #include "process.h"
42 /* IRP object */
44 struct irp_call
46 struct object obj; /* object header */
47 struct list dev_entry; /* entry in device queue */
48 struct list mgr_entry; /* entry in manager queue */
49 struct device_file *file; /* file containing this irp */
50 struct thread *thread; /* thread that queued the irp */
51 client_ptr_t user_arg; /* user arg used to identify the request */
52 struct async *async; /* pending async op */
53 unsigned int status; /* resulting status (or STATUS_PENDING) */
54 irp_params_t params; /* irp parameters */
55 data_size_t result; /* size of result (input or output depending on the type) */
56 data_size_t in_size; /* size of input data */
57 void *in_data; /* input data */
58 data_size_t out_size; /* size of output data */
59 void *out_data; /* output data */
62 static void irp_call_dump( struct object *obj, int verbose );
63 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
64 static void irp_call_destroy( struct object *obj );
66 static const struct object_ops irp_call_ops =
68 sizeof(struct irp_call), /* size */
69 irp_call_dump, /* dump */
70 no_get_type, /* get_type */
71 add_queue, /* add_queue */
72 remove_queue, /* remove_queue */
73 irp_call_signaled, /* signaled */
74 no_satisfied, /* satisfied */
75 no_signal, /* signal */
76 no_get_fd, /* get_fd */
77 no_map_access, /* map_access */
78 default_get_sd, /* get_sd */
79 default_set_sd, /* set_sd */
80 no_lookup_name, /* lookup_name */
81 no_open_file, /* open_file */
82 no_close_handle, /* close_handle */
83 irp_call_destroy /* destroy */
87 /* device manager (a list of devices managed by the same client process) */
89 struct device_manager
91 struct object obj; /* object header */
92 struct list devices; /* list of devices */
93 struct list requests; /* list of pending irps across all devices */
96 static void device_manager_dump( struct object *obj, int verbose );
97 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
98 static void device_manager_destroy( struct object *obj );
100 static const struct object_ops device_manager_ops =
102 sizeof(struct device_manager), /* size */
103 device_manager_dump, /* dump */
104 no_get_type, /* get_type */
105 add_queue, /* add_queue */
106 remove_queue, /* remove_queue */
107 device_manager_signaled, /* signaled */
108 no_satisfied, /* satisfied */
109 no_signal, /* signal */
110 no_get_fd, /* get_fd */
111 no_map_access, /* map_access */
112 default_get_sd, /* get_sd */
113 default_set_sd, /* set_sd */
114 no_lookup_name, /* lookup_name */
115 no_open_file, /* open_file */
116 no_close_handle, /* close_handle */
117 device_manager_destroy /* destroy */
121 /* device (a single device object) */
123 struct device
125 struct object obj; /* object header */
126 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
127 char *unix_path; /* path to unix device if any */
128 client_ptr_t user_ptr; /* opaque ptr for client side */
129 struct list entry; /* entry in device manager list */
130 struct list files; /* list of open files */
133 static void device_dump( struct object *obj, int verbose );
134 static struct object_type *device_get_type( struct object *obj );
135 static void device_destroy( struct object *obj );
136 static struct object *device_open_file( struct object *obj, unsigned int access,
137 unsigned int sharing, unsigned int options );
139 static const struct object_ops device_ops =
141 sizeof(struct device), /* size */
142 device_dump, /* dump */
143 device_get_type, /* get_type */
144 no_add_queue, /* add_queue */
145 NULL, /* remove_queue */
146 NULL, /* signaled */
147 no_satisfied, /* satisfied */
148 no_signal, /* signal */
149 no_get_fd, /* get_fd */
150 default_fd_map_access, /* map_access */
151 default_get_sd, /* get_sd */
152 default_set_sd, /* set_sd */
153 no_lookup_name, /* lookup_name */
154 device_open_file, /* open_file */
155 no_close_handle, /* close_handle */
156 device_destroy /* destroy */
160 /* device file (an open file handle to a device) */
162 struct device_file
164 struct object obj; /* object header */
165 struct device *device; /* device for this file */
166 struct fd *fd; /* file descriptor for irp */
167 struct list entry; /* entry in device list */
168 struct list requests; /* list of pending irp requests */
171 static void device_file_dump( struct object *obj, int verbose );
172 static struct fd *device_file_get_fd( struct object *obj );
173 static void device_file_destroy( struct object *obj );
174 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
175 static obj_handle_t device_file_read( struct fd *fd, const async_data_t *async_data, int blocking,
176 file_pos_t pos );
177 static obj_handle_t device_file_write( struct fd *fd, const async_data_t *async_data, int blocking,
178 file_pos_t pos, data_size_t *written );
179 static obj_handle_t device_file_flush( struct fd *fd, const async_data_t *async_data, int blocking );
180 static obj_handle_t device_file_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
181 int blocking );
183 static const struct object_ops device_file_ops =
185 sizeof(struct device_file), /* size */
186 device_file_dump, /* dump */
187 no_get_type, /* get_type */
188 add_queue, /* add_queue */
189 remove_queue, /* remove_queue */
190 default_fd_signaled, /* signaled */
191 no_satisfied, /* satisfied */
192 no_signal, /* signal */
193 device_file_get_fd, /* get_fd */
194 default_fd_map_access, /* map_access */
195 default_get_sd, /* get_sd */
196 default_set_sd, /* set_sd */
197 no_lookup_name, /* lookup_name */
198 no_open_file, /* open_file */
199 no_close_handle, /* close_handle */
200 device_file_destroy /* destroy */
203 static const struct fd_ops device_file_fd_ops =
205 default_fd_get_poll_events, /* get_poll_events */
206 default_poll_event, /* poll_event */
207 device_file_get_fd_type, /* get_fd_type */
208 device_file_read, /* read */
209 device_file_write, /* write */
210 device_file_flush, /* flush */
211 device_file_ioctl, /* ioctl */
212 default_fd_queue_async, /* queue_async */
213 default_fd_reselect_async, /* reselect_async */
214 default_fd_cancel_async /* cancel_async */
218 static void irp_call_dump( struct object *obj, int verbose )
220 struct irp_call *irp = (struct irp_call *)obj;
221 fprintf( stderr, "IRP call file=%p\n", irp->file );
224 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
226 struct irp_call *irp = (struct irp_call *)obj;
228 return !irp->file; /* file is cleared once the irp has completed */
231 static void irp_call_destroy( struct object *obj )
233 struct irp_call *irp = (struct irp_call *)obj;
235 free( irp->in_data );
236 free( irp->out_data );
237 if (irp->async)
239 async_terminate( irp->async, STATUS_CANCELLED );
240 release_object( irp->async );
242 if (irp->file) release_object( irp->file );
243 release_object( irp->thread );
246 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params,
247 const void *in_data, data_size_t in_size, data_size_t out_size )
249 struct irp_call *irp;
251 if (!file->device->manager) /* it has been deleted */
253 set_error( STATUS_FILE_DELETED );
254 return NULL;
257 if ((irp = alloc_object( &irp_call_ops )))
259 irp->file = (struct device_file *)grab_object( file );
260 irp->async = NULL;
261 irp->params = *params;
262 irp->status = STATUS_PENDING;
263 irp->result = 0;
264 irp->in_size = in_size;
265 irp->in_data = NULL;
266 irp->out_size = out_size;
267 irp->out_data = NULL;
269 if (irp->in_size && !(irp->in_data = memdup( in_data, in_size )))
271 release_object( irp );
272 irp = NULL;
275 return irp;
278 static void set_irp_result( struct irp_call *irp, unsigned int status,
279 const void *out_data, data_size_t out_size, data_size_t result )
281 struct device_file *file = irp->file;
283 if (!file) return; /* already finished */
285 /* FIXME: handle the STATUS_PENDING case */
286 irp->status = status;
287 irp->result = result;
288 irp->out_size = min( irp->out_size, out_size );
289 if (irp->out_size && !(irp->out_data = memdup( out_data, irp->out_size )))
290 irp->out_size = 0;
291 release_object( file );
292 irp->file = NULL;
293 if (irp->async)
295 if (result) status = STATUS_ALERTED;
296 async_terminate( irp->async, status );
297 release_object( irp->async );
298 irp->async = NULL;
300 wake_up( &irp->obj, 0 );
302 if (status != STATUS_ALERTED)
304 /* remove it from the device queue */
305 /* (for STATUS_ALERTED this will be done in get_irp_result) */
306 list_remove( &irp->dev_entry );
307 release_object( irp ); /* no longer on the device queue */
312 static void device_dump( struct object *obj, int verbose )
314 struct device *device = (struct device *)obj;
316 fprintf( stderr, "Device " );
317 dump_object_name( &device->obj );
318 fputc( '\n', stderr );
321 static struct object_type *device_get_type( struct object *obj )
323 static const WCHAR name[] = {'D','e','v','i','c','e'};
324 static const struct unicode_str str = { name, sizeof(name) };
325 return get_object_type( &str );
328 static void device_destroy( struct object *obj )
330 struct device *device = (struct device *)obj;
332 assert( list_empty( &device->files ));
334 free( device->unix_path );
335 if (device->manager) list_remove( &device->entry );
338 static struct object *device_open_file( struct object *obj, unsigned int access,
339 unsigned int sharing, unsigned int options )
341 struct device *device = (struct device *)obj;
342 struct device_file *file;
344 if ((file = alloc_object( &device_file_ops )))
346 file->device = (struct device *)grab_object( device );
347 list_init( &file->requests );
348 list_add_tail( &device->files, &file->entry );
349 if (device->unix_path)
351 mode_t mode = 0666;
352 access = file->obj.ops->map_access( &file->obj, access );
353 file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
354 &mode, access, sharing, options );
355 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
357 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, 0 );
359 if (!file->fd)
361 release_object( file );
362 file = NULL;
365 return &file->obj;
368 static void device_file_dump( struct object *obj, int verbose )
370 struct device_file *file = (struct device_file *)obj;
372 fprintf( stderr, "File on device %p\n", file->device );
375 static struct fd *device_file_get_fd( struct object *obj )
377 struct device_file *file = (struct device_file *)obj;
379 return (struct fd *)grab_object( file->fd );
382 static void device_file_destroy( struct object *obj )
384 struct device_file *file = (struct device_file *)obj;
385 struct irp_call *irp, *next;
387 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
389 list_remove( &irp->dev_entry );
390 release_object( irp ); /* no longer on the device queue */
392 if (file->fd) release_object( file->fd );
393 list_remove( &file->entry );
394 release_object( file->device );
397 static struct irp_call *find_irp_call( struct device_file *file, struct thread *thread,
398 client_ptr_t user_arg )
400 struct irp_call *irp;
402 LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
403 if (irp->thread == thread && irp->user_arg == user_arg) return irp;
405 set_error( STATUS_INVALID_PARAMETER );
406 return NULL;
409 /* queue an irp to the device */
410 static obj_handle_t queue_irp( struct device_file *file, struct irp_call *irp,
411 const async_data_t *async_data, int blocking )
413 obj_handle_t handle = 0;
414 struct device_manager *manager = file->device->manager;
416 assert( manager );
418 if (blocking && !(handle = alloc_handle( current->process, irp, SYNCHRONIZE, 0 ))) return 0;
420 if (!(irp->async = fd_queue_async( file->fd, async_data, ASYNC_TYPE_WAIT )))
422 if (handle) close_handle( current->process, handle );
423 return 0;
425 irp->thread = (struct thread *)grab_object( current );
426 irp->user_arg = async_data->arg;
427 grab_object( irp ); /* grab reference for queued irp */
429 list_add_tail( &file->requests, &irp->dev_entry );
430 list_add_tail( &manager->requests, &irp->mgr_entry );
431 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
432 set_error( STATUS_PENDING );
433 return handle;
436 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
438 return FD_TYPE_DEVICE;
441 static obj_handle_t device_file_read( struct fd *fd, const async_data_t *async_data, int blocking,
442 file_pos_t pos )
444 struct device_file *file = get_fd_user( fd );
445 struct irp_call *irp;
446 obj_handle_t handle;
447 irp_params_t params;
449 params.major = IRP_MJ_READ;
450 params.read.key = 0;
451 params.read.pos = pos;
453 irp = create_irp( file, &params, NULL, 0, get_reply_max_size() );
454 if (!irp) return 0;
456 handle = queue_irp( file, irp, async_data, blocking );
457 release_object( irp );
458 return handle;
461 static obj_handle_t device_file_write( struct fd *fd, const async_data_t *async_data, int blocking,
462 file_pos_t pos, data_size_t *written )
464 struct device_file *file = get_fd_user( fd );
465 struct irp_call *irp;
466 obj_handle_t handle;
467 irp_params_t params;
469 params.major = IRP_MJ_WRITE;
470 params.write.key = 0;
471 params.write.pos = pos;
473 irp = create_irp( file, &params, get_req_data(), get_req_data_size(), 0 );
474 if (!irp) return 0;
476 handle = queue_irp( file, irp, async_data, blocking );
477 release_object( irp );
478 return handle;
481 static obj_handle_t device_file_flush( struct fd *fd, const async_data_t *async_data, int blocking )
483 struct device_file *file = get_fd_user( fd );
484 struct irp_call *irp;
485 obj_handle_t handle;
486 irp_params_t params;
488 params.major = IRP_MJ_FLUSH_BUFFERS;
490 irp = create_irp( file, &params, NULL, 0, 0 );
491 if (!irp) return 0;
493 handle = queue_irp( file, irp, async_data, blocking );
494 release_object( irp );
495 return handle;
498 static obj_handle_t device_file_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
499 int blocking )
501 struct device_file *file = get_fd_user( fd );
502 struct irp_call *irp;
503 obj_handle_t handle;
504 irp_params_t params;
506 params.major = IRP_MJ_DEVICE_CONTROL;
507 params.ioctl.code = code;
509 irp = create_irp( file, &params, get_req_data(), get_req_data_size(),
510 get_reply_max_size() );
511 if (!irp) return 0;
513 handle = queue_irp( file, irp, async_data, blocking );
514 release_object( irp );
515 return handle;
518 static struct device *create_device( struct directory *root, const struct unicode_str *name,
519 struct device_manager *manager, unsigned int attr )
521 struct device *device;
523 if ((device = create_named_object_dir( root, name, attr, &device_ops )))
525 if (get_error() != STATUS_OBJECT_NAME_EXISTS)
527 /* initialize it if it didn't already exist */
528 device->unix_path = NULL;
529 device->manager = manager;
530 list_add_tail( &manager->devices, &device->entry );
531 list_init( &device->files );
534 return device;
537 struct device *create_unix_device( struct directory *root, const struct unicode_str *name,
538 const char *unix_path )
540 struct device *device;
542 if ((device = create_named_object_dir( root, name, 0, &device_ops )))
544 device->unix_path = strdup( unix_path );
545 device->manager = NULL; /* no manager, requests go straight to the Unix device */
546 list_init( &device->files );
547 make_object_static( &device->obj );
549 return device;
553 /* terminate requests when the underlying device is deleted */
554 static void delete_file( struct device_file *file )
556 struct irp_call *irp, *next;
558 /* terminate all pending requests */
559 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
561 list_remove( &irp->mgr_entry );
562 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
566 static void delete_device( struct device *device )
568 struct device_file *file, *next;
570 if (!device->manager) return; /* already deleted */
572 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
573 delete_file( file );
575 unlink_named_object( &device->obj );
576 list_remove( &device->entry );
577 device->manager = NULL;
581 static void device_manager_dump( struct object *obj, int verbose )
583 fprintf( stderr, "Device manager\n" );
586 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
588 struct device_manager *manager = (struct device_manager *)obj;
590 return !list_empty( &manager->requests );
593 static void device_manager_destroy( struct object *obj )
595 struct device_manager *manager = (struct device_manager *)obj;
596 struct list *ptr;
598 while ((ptr = list_head( &manager->devices )))
600 struct device *device = LIST_ENTRY( ptr, struct device, entry );
601 delete_device( device );
605 static struct device_manager *create_device_manager(void)
607 struct device_manager *manager;
609 if ((manager = alloc_object( &device_manager_ops )))
611 list_init( &manager->devices );
612 list_init( &manager->requests );
614 return manager;
618 /* create a device manager */
619 DECL_HANDLER(create_device_manager)
621 struct device_manager *manager = create_device_manager();
623 if (manager)
625 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
626 release_object( manager );
631 /* create a device */
632 DECL_HANDLER(create_device)
634 struct device *device;
635 struct unicode_str name;
636 struct device_manager *manager;
637 struct directory *root = NULL;
639 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
640 0, &device_manager_ops )))
641 return;
643 get_req_unicode_str( &name );
644 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir, 0 )))
646 release_object( manager );
647 return;
650 if ((device = create_device( root, &name, manager, req->attributes )))
652 device->user_ptr = req->user_ptr;
653 reply->handle = alloc_handle( current->process, device, req->access, req->attributes );
654 release_object( device );
657 if (root) release_object( root );
658 release_object( manager );
662 /* delete a device */
663 DECL_HANDLER(delete_device)
665 struct device *device;
667 if ((device = (struct device *)get_handle_obj( current->process, req->handle, 0, &device_ops )))
669 delete_device( device );
670 release_object( device );
675 /* retrieve the next pending device irp request */
676 DECL_HANDLER(get_next_device_request)
678 struct irp_call *irp;
679 struct device_manager *manager;
680 struct list *ptr;
682 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
683 0, &device_manager_ops )))
684 return;
686 if (req->prev)
688 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->prev, 0, &irp_call_ops )))
690 set_irp_result( irp, req->status, NULL, 0, 0 );
691 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
692 release_object( irp );
694 clear_error();
697 if ((ptr = list_head( &manager->requests )))
699 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
700 reply->params = irp->params;
701 reply->user_ptr = irp->file->device->user_ptr;
702 reply->client_pid = get_process_id( irp->thread->process );
703 reply->client_tid = get_thread_id( irp->thread );
704 reply->in_size = irp->in_size;
705 reply->out_size = irp->out_size;
706 if (irp->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
707 else if ((reply->next = alloc_handle( current->process, irp, 0, 0 )))
709 set_reply_data_ptr( irp->in_data, irp->in_size );
710 irp->in_data = NULL;
711 irp->in_size = 0;
712 list_remove( &irp->mgr_entry );
713 list_init( &irp->mgr_entry );
716 else set_error( STATUS_PENDING );
718 release_object( manager );
722 /* store results of an async irp */
723 DECL_HANDLER(set_irp_result)
725 struct irp_call *irp;
726 struct device_manager *manager;
728 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
729 0, &device_manager_ops )))
730 return;
732 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
734 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
735 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
736 release_object( irp );
738 release_object( manager );
742 /* retrieve results of an async irp */
743 DECL_HANDLER(get_irp_result)
745 struct device_file *file;
746 struct irp_call *irp;
748 if (!(file = (struct device_file *)get_handle_obj( current->process, req->handle,
749 0, &device_file_ops )))
750 return;
752 if ((irp = find_irp_call( file, current, req->user_arg )))
754 if (irp->out_data)
756 data_size_t size = min( irp->out_size, get_reply_max_size() );
757 if (size)
759 set_reply_data_ptr( irp->out_data, size );
760 irp->out_data = NULL;
763 reply->size = irp->result;
764 set_error( irp->status );
765 list_remove( &irp->dev_entry );
766 release_object( irp ); /* no longer on the device queue */
768 release_object( file );