ntoskrnl: Add support for read and write requests.
[wine/multimedia.git] / server / device.c
bloba2fc1cbd2e26131429dd1f6e61271085bd372fbb
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include <assert.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
26 #include "ntstatus.h"
27 #define WIN32_NO_STATUS
28 #include "windef.h"
29 #include "winternl.h"
30 #include "ddk/wdm.h"
32 #include "object.h"
33 #include "file.h"
34 #include "handle.h"
35 #include "request.h"
36 #include "process.h"
38 struct irp_call
40 struct object obj; /* object header */
41 struct list dev_entry; /* entry in device queue */
42 struct list mgr_entry; /* entry in manager queue */
43 struct device *device; /* device containing this irp */
44 struct thread *thread; /* thread that queued the irp */
45 client_ptr_t user_arg; /* user arg used to identify the request */
46 struct async *async; /* pending async op */
47 unsigned int status; /* resulting status (or STATUS_PENDING) */
48 irp_params_t params; /* irp parameters */
49 data_size_t result; /* size of result (input or output depending on the type) */
50 data_size_t in_size; /* size of input data */
51 void *in_data; /* input data */
52 data_size_t out_size; /* size of output data */
53 void *out_data; /* output data */
56 static void irp_call_dump( struct object *obj, int verbose );
57 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
58 static void irp_call_destroy( struct object *obj );
60 static const struct object_ops irp_call_ops =
62 sizeof(struct irp_call), /* size */
63 irp_call_dump, /* dump */
64 no_get_type, /* get_type */
65 add_queue, /* add_queue */
66 remove_queue, /* remove_queue */
67 irp_call_signaled, /* signaled */
68 no_satisfied, /* satisfied */
69 no_signal, /* signal */
70 no_get_fd, /* get_fd */
71 no_map_access, /* map_access */
72 default_get_sd, /* get_sd */
73 default_set_sd, /* set_sd */
74 no_lookup_name, /* lookup_name */
75 no_open_file, /* open_file */
76 no_close_handle, /* close_handle */
77 irp_call_destroy /* destroy */
81 struct device_manager
83 struct object obj; /* object header */
84 struct list devices; /* list of devices */
85 struct list requests; /* list of pending irps across all devices */
88 static void device_manager_dump( struct object *obj, int verbose );
89 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
90 static void device_manager_destroy( struct object *obj );
92 static const struct object_ops device_manager_ops =
94 sizeof(struct device_manager), /* size */
95 device_manager_dump, /* dump */
96 no_get_type, /* get_type */
97 add_queue, /* add_queue */
98 remove_queue, /* remove_queue */
99 device_manager_signaled, /* signaled */
100 no_satisfied, /* satisfied */
101 no_signal, /* signal */
102 no_get_fd, /* get_fd */
103 no_map_access, /* map_access */
104 default_get_sd, /* get_sd */
105 default_set_sd, /* set_sd */
106 no_lookup_name, /* lookup_name */
107 no_open_file, /* open_file */
108 no_close_handle, /* close_handle */
109 device_manager_destroy /* destroy */
113 struct device
115 struct object obj; /* object header */
116 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
117 struct fd *fd; /* file descriptor for irp */
118 client_ptr_t user_ptr; /* opaque ptr for client side */
119 struct list entry; /* entry in device manager list */
120 struct list requests; /* list of pending irp requests */
123 static void device_dump( struct object *obj, int verbose );
124 static struct object_type *device_get_type( struct object *obj );
125 static struct fd *device_get_fd( struct object *obj );
126 static void device_destroy( struct object *obj );
127 static struct object *device_open_file( struct object *obj, unsigned int access,
128 unsigned int sharing, unsigned int options );
129 static enum server_fd_type device_get_fd_type( struct fd *fd );
130 static obj_handle_t device_read( struct fd *fd, const async_data_t *async_data, int blocking,
131 file_pos_t pos );
132 static obj_handle_t device_write( struct fd *fd, const async_data_t *async_data, int blocking,
133 file_pos_t pos, data_size_t *written );
134 static obj_handle_t device_flush( struct fd *fd, const async_data_t *async_data, int blocking );
135 static obj_handle_t device_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
136 int blocking );
138 static const struct object_ops device_ops =
140 sizeof(struct device), /* size */
141 device_dump, /* dump */
142 device_get_type, /* get_type */
143 no_add_queue, /* add_queue */
144 NULL, /* remove_queue */
145 NULL, /* signaled */
146 no_satisfied, /* satisfied */
147 no_signal, /* signal */
148 device_get_fd, /* get_fd */
149 default_fd_map_access, /* map_access */
150 default_get_sd, /* get_sd */
151 default_set_sd, /* set_sd */
152 no_lookup_name, /* lookup_name */
153 device_open_file, /* open_file */
154 no_close_handle, /* close_handle */
155 device_destroy /* destroy */
158 static const struct fd_ops device_fd_ops =
160 default_fd_get_poll_events, /* get_poll_events */
161 default_poll_event, /* poll_event */
162 device_get_fd_type, /* get_fd_type */
163 device_read, /* read */
164 device_write, /* write */
165 device_flush, /* flush */
166 device_ioctl, /* ioctl */
167 default_fd_queue_async, /* queue_async */
168 default_fd_reselect_async, /* reselect_async */
169 default_fd_cancel_async /* cancel_async */
173 static void irp_call_dump( struct object *obj, int verbose )
175 struct irp_call *irp = (struct irp_call *)obj;
176 fprintf( stderr, "IRP call device=%p\n", irp->device );
179 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
181 struct irp_call *irp = (struct irp_call *)obj;
183 return !irp->device; /* device is cleared once the irp has completed */
186 static void irp_call_destroy( struct object *obj )
188 struct irp_call *irp = (struct irp_call *)obj;
190 free( irp->in_data );
191 free( irp->out_data );
192 if (irp->async)
194 async_terminate( irp->async, STATUS_CANCELLED );
195 release_object( irp->async );
197 if (irp->device) release_object( irp->device );
198 release_object( irp->thread );
201 static struct irp_call *create_irp( struct device *device, const irp_params_t *params,
202 const void *in_data, data_size_t in_size, data_size_t out_size )
204 struct irp_call *irp;
206 if (!device->manager) /* it has been deleted */
208 set_error( STATUS_FILE_DELETED );
209 return NULL;
212 if ((irp = alloc_object( &irp_call_ops )))
214 irp->device = (struct device *)grab_object( device );
215 irp->async = NULL;
216 irp->params = *params;
217 irp->status = STATUS_PENDING;
218 irp->result = 0;
219 irp->in_size = in_size;
220 irp->in_data = NULL;
221 irp->out_size = out_size;
222 irp->out_data = NULL;
224 if (irp->in_size && !(irp->in_data = memdup( in_data, in_size )))
226 release_object( irp );
227 irp = NULL;
230 return irp;
233 static void set_irp_result( struct irp_call *irp, unsigned int status,
234 const void *out_data, data_size_t out_size, data_size_t result )
236 struct device *device = irp->device;
238 if (!device) return; /* already finished */
240 /* FIXME: handle the STATUS_PENDING case */
241 irp->status = status;
242 irp->result = result;
243 irp->out_size = min( irp->out_size, out_size );
244 if (irp->out_size && !(irp->out_data = memdup( out_data, irp->out_size )))
245 irp->out_size = 0;
246 release_object( device );
247 irp->device = NULL;
248 if (irp->async)
250 if (result) status = STATUS_ALERTED;
251 async_terminate( irp->async, status );
252 release_object( irp->async );
253 irp->async = NULL;
255 wake_up( &irp->obj, 0 );
257 if (status != STATUS_ALERTED)
259 /* remove it from the device queue */
260 /* (for STATUS_ALERTED this will be done in get_irp_result) */
261 list_remove( &irp->dev_entry );
262 release_object( irp ); /* no longer on the device queue */
267 static void device_dump( struct object *obj, int verbose )
269 struct device *device = (struct device *)obj;
271 fprintf( stderr, "Device " );
272 dump_object_name( &device->obj );
273 fputc( '\n', stderr );
276 static struct object_type *device_get_type( struct object *obj )
278 static const WCHAR name[] = {'D','e','v','i','c','e'};
279 static const struct unicode_str str = { name, sizeof(name) };
280 return get_object_type( &str );
283 static struct fd *device_get_fd( struct object *obj )
285 struct device *device = (struct device *)obj;
287 return (struct fd *)grab_object( device->fd );
290 static void device_destroy( struct object *obj )
292 struct device *device = (struct device *)obj;
293 struct irp_call *irp, *next;
295 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &device->requests, struct irp_call, dev_entry )
297 list_remove( &irp->dev_entry );
298 release_object( irp ); /* no longer on the device queue */
300 if (device->fd) release_object( device->fd );
301 if (device->manager) list_remove( &device->entry );
304 static struct object *device_open_file( struct object *obj, unsigned int access,
305 unsigned int sharing, unsigned int options )
307 return grab_object( obj );
310 static enum server_fd_type device_get_fd_type( struct fd *fd )
312 return FD_TYPE_DEVICE;
315 static struct irp_call *find_irp_call( struct device *device, struct thread *thread,
316 client_ptr_t user_arg )
318 struct irp_call *irp;
320 LIST_FOR_EACH_ENTRY( irp, &device->requests, struct irp_call, dev_entry )
321 if (irp->thread == thread && irp->user_arg == user_arg) return irp;
323 set_error( STATUS_INVALID_PARAMETER );
324 return NULL;
327 /* queue an irp to the device */
328 static obj_handle_t queue_irp( struct device *device, struct irp_call *irp,
329 const async_data_t *async_data, int blocking )
331 obj_handle_t handle = 0;
333 if (blocking && !(handle = alloc_handle( current->process, irp, SYNCHRONIZE, 0 ))) return 0;
335 if (!(irp->async = fd_queue_async( device->fd, async_data, ASYNC_TYPE_WAIT )))
337 if (handle) close_handle( current->process, handle );
338 return 0;
340 irp->thread = (struct thread *)grab_object( current );
341 irp->user_arg = async_data->arg;
342 grab_object( irp ); /* grab reference for queued irp */
344 list_add_tail( &device->requests, &irp->dev_entry );
345 list_add_tail( &device->manager->requests, &irp->mgr_entry );
346 if (list_head( &device->manager->requests ) == &irp->mgr_entry) /* first one */
347 wake_up( &device->manager->obj, 0 );
348 set_error( STATUS_PENDING );
349 return handle;
352 static obj_handle_t device_read( struct fd *fd, const async_data_t *async_data, int blocking,
353 file_pos_t pos )
355 struct device *device = get_fd_user( fd );
356 struct irp_call *irp;
357 obj_handle_t handle;
358 irp_params_t params;
360 params.major = IRP_MJ_READ;
361 params.read.key = 0;
362 params.read.pos = pos;
364 irp = create_irp( device, &params, NULL, 0, get_reply_max_size() );
365 if (!irp) return 0;
367 handle = queue_irp( device, irp, async_data, blocking );
368 release_object( irp );
369 return handle;
372 static obj_handle_t device_write( struct fd *fd, const async_data_t *async_data, int blocking,
373 file_pos_t pos, data_size_t *written )
375 struct device *device = get_fd_user( fd );
376 struct irp_call *irp;
377 obj_handle_t handle;
378 irp_params_t params;
380 params.major = IRP_MJ_WRITE;
381 params.write.key = 0;
382 params.write.pos = pos;
384 irp = create_irp( device, &params, get_req_data(), get_req_data_size(), 0 );
385 if (!irp) return 0;
387 handle = queue_irp( device, irp, async_data, blocking );
388 release_object( irp );
389 return handle;
392 static obj_handle_t device_flush( struct fd *fd, const async_data_t *async_data, int blocking )
394 struct device *device = get_fd_user( fd );
395 struct irp_call *irp;
396 obj_handle_t handle;
397 irp_params_t params;
399 params.major = IRP_MJ_FLUSH_BUFFERS;
401 irp = create_irp( device, &params, NULL, 0, 0 );
402 if (!irp) return 0;
404 handle = queue_irp( device, irp, async_data, blocking );
405 release_object( irp );
406 return handle;
409 static obj_handle_t device_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
410 int blocking )
412 struct device *device = get_fd_user( fd );
413 struct irp_call *irp;
414 obj_handle_t handle;
415 irp_params_t params;
417 params.major = IRP_MJ_DEVICE_CONTROL;
418 params.ioctl.code = code;
420 irp = create_irp( device, &params, get_req_data(), get_req_data_size(),
421 get_reply_max_size() );
422 if (!irp) return 0;
424 handle = queue_irp( device, irp, async_data, blocking );
425 release_object( irp );
426 return handle;
429 static struct device *create_device( struct directory *root, const struct unicode_str *name,
430 struct device_manager *manager, unsigned int attr )
432 struct device *device;
434 if ((device = create_named_object_dir( root, name, attr, &device_ops )))
436 if (get_error() != STATUS_OBJECT_NAME_EXISTS)
438 /* initialize it if it didn't already exist */
439 device->manager = manager;
440 list_add_tail( &manager->devices, &device->entry );
441 list_init( &device->requests );
442 if (!(device->fd = alloc_pseudo_fd( &device_fd_ops, &device->obj, 0 )))
444 release_object( device );
445 device = NULL;
449 return device;
452 static void delete_device( struct device *device )
454 struct irp_call *irp, *next;
456 if (!device->manager) return; /* already deleted */
458 /* terminate all pending requests */
459 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &device->requests, struct irp_call, dev_entry )
461 list_remove( &irp->mgr_entry );
462 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
464 unlink_named_object( &device->obj );
465 list_remove( &device->entry );
466 device->manager = NULL;
470 static void device_manager_dump( struct object *obj, int verbose )
472 fprintf( stderr, "Device manager\n" );
475 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
477 struct device_manager *manager = (struct device_manager *)obj;
479 return !list_empty( &manager->requests );
482 static void device_manager_destroy( struct object *obj )
484 struct device_manager *manager = (struct device_manager *)obj;
485 struct list *ptr;
487 while ((ptr = list_head( &manager->devices )))
489 struct device *device = LIST_ENTRY( ptr, struct device, entry );
490 delete_device( device );
494 static struct device_manager *create_device_manager(void)
496 struct device_manager *manager;
498 if ((manager = alloc_object( &device_manager_ops )))
500 list_init( &manager->devices );
501 list_init( &manager->requests );
503 return manager;
507 /* create a device manager */
508 DECL_HANDLER(create_device_manager)
510 struct device_manager *manager = create_device_manager();
512 if (manager)
514 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
515 release_object( manager );
520 /* create a device */
521 DECL_HANDLER(create_device)
523 struct device *device;
524 struct unicode_str name;
525 struct device_manager *manager;
526 struct directory *root = NULL;
528 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
529 0, &device_manager_ops )))
530 return;
532 get_req_unicode_str( &name );
533 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir, 0 )))
535 release_object( manager );
536 return;
539 if ((device = create_device( root, &name, manager, req->attributes )))
541 device->user_ptr = req->user_ptr;
542 reply->handle = alloc_handle( current->process, device, req->access, req->attributes );
543 release_object( device );
546 if (root) release_object( root );
547 release_object( manager );
551 /* delete a device */
552 DECL_HANDLER(delete_device)
554 struct device *device;
556 if ((device = (struct device *)get_handle_obj( current->process, req->handle, 0, &device_ops )))
558 delete_device( device );
559 release_object( device );
564 /* retrieve the next pending device irp request */
565 DECL_HANDLER(get_next_device_request)
567 struct irp_call *irp;
568 struct device_manager *manager;
569 struct list *ptr;
571 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
572 0, &device_manager_ops )))
573 return;
575 if (req->prev)
577 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->prev,
578 0, &irp_call_ops )))
580 set_irp_result( irp, req->status, NULL, 0, 0 );
581 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
582 release_object( irp );
584 clear_error();
587 if ((ptr = list_head( &manager->requests )))
589 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
590 reply->params = irp->params;
591 reply->user_ptr = irp->device->user_ptr;
592 reply->client_pid = get_process_id( irp->thread->process );
593 reply->client_tid = get_thread_id( irp->thread );
594 reply->in_size = irp->in_size;
595 reply->out_size = irp->out_size;
596 if (irp->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
597 else if ((reply->next = alloc_handle( current->process, irp, 0, 0 )))
599 set_reply_data_ptr( irp->in_data, irp->in_size );
600 irp->in_data = NULL;
601 irp->in_size = 0;
602 list_remove( &irp->mgr_entry );
603 list_init( &irp->mgr_entry );
606 else set_error( STATUS_PENDING );
608 release_object( manager );
612 /* store results of an async irp */
613 DECL_HANDLER(set_irp_result)
615 struct irp_call *irp;
616 struct device_manager *manager;
618 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
619 0, &device_manager_ops )))
620 return;
622 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
624 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
625 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
626 release_object( irp );
628 release_object( manager );
632 /* retrieve results of an async irp */
633 DECL_HANDLER(get_irp_result)
635 struct device *device;
636 struct irp_call *irp;
638 if (!(device = (struct device *)get_handle_obj( current->process, req->handle, 0, &device_ops )))
639 return;
641 if ((irp = find_irp_call( device, current, req->user_arg )))
643 if (irp->out_data)
645 data_size_t size = min( irp->out_size, get_reply_max_size() );
646 if (size)
648 set_reply_data_ptr( irp->out_data, size );
649 irp->out_data = NULL;
652 reply->size = irp->result;
653 set_error( irp->status );
654 list_remove( &irp->dev_entry );
655 release_object( irp ); /* no longer on the device queue */
657 release_object( device );