2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
31 #define WIN32_NO_STATUS
46 struct object obj
; /* object header */
47 struct list dev_entry
; /* entry in device queue */
48 struct list mgr_entry
; /* entry in manager queue */
49 struct device_file
*file
; /* file containing this irp */
50 struct thread
*thread
; /* thread that queued the irp */
51 struct async
*async
; /* pending async op */
52 irp_params_t params
; /* irp parameters */
53 struct iosb
*iosb
; /* I/O status block */
56 static void irp_call_dump( struct object
*obj
, int verbose
);
57 static int irp_call_signaled( struct object
*obj
, struct wait_queue_entry
*entry
);
58 static void irp_call_destroy( struct object
*obj
);
60 static const struct object_ops irp_call_ops
=
62 sizeof(struct irp_call
), /* size */
63 irp_call_dump
, /* dump */
64 no_get_type
, /* get_type */
65 add_queue
, /* add_queue */
66 remove_queue
, /* remove_queue */
67 irp_call_signaled
, /* signaled */
68 no_satisfied
, /* satisfied */
69 no_signal
, /* signal */
70 no_get_fd
, /* get_fd */
71 no_map_access
, /* map_access */
72 default_get_sd
, /* get_sd */
73 default_set_sd
, /* set_sd */
74 no_lookup_name
, /* lookup_name */
75 no_link_name
, /* link_name */
76 NULL
, /* unlink_name */
77 no_open_file
, /* open_file */
78 no_close_handle
, /* close_handle */
79 irp_call_destroy
/* destroy */
83 /* device manager (a list of devices managed by the same client process) */
87 struct object obj
; /* object header */
88 struct list devices
; /* list of devices */
89 struct list requests
; /* list of pending irps across all devices */
92 static void device_manager_dump( struct object
*obj
, int verbose
);
93 static int device_manager_signaled( struct object
*obj
, struct wait_queue_entry
*entry
);
94 static void device_manager_destroy( struct object
*obj
);
96 static const struct object_ops device_manager_ops
=
98 sizeof(struct device_manager
), /* size */
99 device_manager_dump
, /* dump */
100 no_get_type
, /* get_type */
101 add_queue
, /* add_queue */
102 remove_queue
, /* remove_queue */
103 device_manager_signaled
, /* signaled */
104 no_satisfied
, /* satisfied */
105 no_signal
, /* signal */
106 no_get_fd
, /* get_fd */
107 no_map_access
, /* map_access */
108 default_get_sd
, /* get_sd */
109 default_set_sd
, /* set_sd */
110 no_lookup_name
, /* lookup_name */
111 no_link_name
, /* link_name */
112 NULL
, /* unlink_name */
113 no_open_file
, /* open_file */
114 no_close_handle
, /* close_handle */
115 device_manager_destroy
/* destroy */
119 /* device (a single device object) */
123 struct object obj
; /* object header */
124 struct device_manager
*manager
; /* manager for this device (or NULL if deleted) */
125 char *unix_path
; /* path to unix device if any */
126 client_ptr_t user_ptr
; /* opaque ptr for client side */
127 struct list entry
; /* entry in device manager list */
128 struct list files
; /* list of open files */
131 static void device_dump( struct object
*obj
, int verbose
);
132 static struct object_type
*device_get_type( struct object
*obj
);
133 static void device_destroy( struct object
*obj
);
134 static struct object
*device_open_file( struct object
*obj
, unsigned int access
,
135 unsigned int sharing
, unsigned int options
);
137 static const struct object_ops device_ops
=
139 sizeof(struct device
), /* size */
140 device_dump
, /* dump */
141 device_get_type
, /* get_type */
142 no_add_queue
, /* add_queue */
143 NULL
, /* remove_queue */
145 no_satisfied
, /* satisfied */
146 no_signal
, /* signal */
147 no_get_fd
, /* get_fd */
148 default_fd_map_access
, /* map_access */
149 default_get_sd
, /* get_sd */
150 default_set_sd
, /* set_sd */
151 no_lookup_name
, /* lookup_name */
152 directory_link_name
, /* link_name */
153 default_unlink_name
, /* unlink_name */
154 device_open_file
, /* open_file */
155 no_close_handle
, /* close_handle */
156 device_destroy
/* destroy */
160 /* device file (an open file handle to a device) */
164 struct object obj
; /* object header */
165 struct device
*device
; /* device for this file */
166 struct fd
*fd
; /* file descriptor for irp */
167 client_ptr_t user_ptr
; /* opaque ptr for client side */
168 struct list entry
; /* entry in device list */
169 struct list requests
; /* list of pending irp requests */
172 static void device_file_dump( struct object
*obj
, int verbose
);
173 static struct fd
*device_file_get_fd( struct object
*obj
);
174 static int device_file_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
);
175 static void device_file_destroy( struct object
*obj
);
176 static enum server_fd_type
device_file_get_fd_type( struct fd
*fd
);
177 static int device_file_read( struct fd
*fd
, struct async
*async
, file_pos_t pos
);
178 static int device_file_write( struct fd
*fd
, struct async
*async
, file_pos_t pos
);
179 static int device_file_flush( struct fd
*fd
, struct async
*async
);
180 static int device_file_ioctl( struct fd
*fd
, ioctl_code_t code
, struct async
*async
);
182 static const struct object_ops device_file_ops
=
184 sizeof(struct device_file
), /* size */
185 device_file_dump
, /* dump */
186 no_get_type
, /* get_type */
187 add_queue
, /* add_queue */
188 remove_queue
, /* remove_queue */
189 default_fd_signaled
, /* signaled */
190 no_satisfied
, /* satisfied */
191 no_signal
, /* signal */
192 device_file_get_fd
, /* get_fd */
193 default_fd_map_access
, /* map_access */
194 default_get_sd
, /* get_sd */
195 default_set_sd
, /* set_sd */
196 no_lookup_name
, /* lookup_name */
197 no_link_name
, /* link_name */
198 NULL
, /* unlink_name */
199 no_open_file
, /* open_file */
200 device_file_close_handle
, /* close_handle */
201 device_file_destroy
/* destroy */
204 static const struct fd_ops device_file_fd_ops
=
206 default_fd_get_poll_events
, /* get_poll_events */
207 default_poll_event
, /* poll_event */
208 device_file_get_fd_type
, /* get_fd_type */
209 device_file_read
, /* read */
210 device_file_write
, /* write */
211 device_file_flush
, /* flush */
212 no_fd_get_volume_info
, /* get_volume_info */
213 device_file_ioctl
, /* ioctl */
214 default_fd_queue_async
, /* queue_async */
215 default_fd_reselect_async
/* reselect_async */
219 static void irp_call_dump( struct object
*obj
, int verbose
)
221 struct irp_call
*irp
= (struct irp_call
*)obj
;
222 fprintf( stderr
, "IRP call file=%p\n", irp
->file
);
225 static int irp_call_signaled( struct object
*obj
, struct wait_queue_entry
*entry
)
227 struct irp_call
*irp
= (struct irp_call
*)obj
;
229 return !irp
->file
; /* file is cleared once the irp has completed */
232 static void irp_call_destroy( struct object
*obj
)
234 struct irp_call
*irp
= (struct irp_call
*)obj
;
238 async_terminate( irp
->async
, STATUS_CANCELLED
);
239 release_object( irp
->async
);
241 if (irp
->iosb
) release_object( irp
->iosb
);
242 if (irp
->file
) release_object( irp
->file
);
243 if (irp
->thread
) release_object( irp
->thread
);
246 static struct irp_call
*create_irp( struct device_file
*file
, const irp_params_t
*params
, struct async
*async
)
248 struct irp_call
*irp
;
250 if (!file
->device
->manager
) /* it has been deleted */
252 set_error( STATUS_FILE_DELETED
);
256 if ((irp
= alloc_object( &irp_call_ops
)))
258 irp
->file
= (struct device_file
*)grab_object( file
);
261 irp
->params
= *params
;
264 if (async
) irp
->iosb
= async_get_iosb( async
);
265 if (!irp
->iosb
&& !(irp
->iosb
= create_iosb( NULL
, 0, 0 )))
267 release_object( irp
);
274 static void set_irp_result( struct irp_call
*irp
, unsigned int status
,
275 const void *out_data
, data_size_t out_size
, data_size_t result
)
277 struct device_file
*file
= irp
->file
;
278 struct iosb
*iosb
= irp
->iosb
;
280 if (!file
) return; /* already finished */
282 /* FIXME: handle the STATUS_PENDING case */
283 iosb
->status
= status
;
284 iosb
->result
= result
;
285 iosb
->out_size
= min( iosb
->out_size
, out_size
);
286 if (iosb
->out_size
&& !(iosb
->out_data
= memdup( out_data
, iosb
->out_size
)))
291 if (result
) status
= STATUS_ALERTED
;
292 async_terminate( irp
->async
, status
);
293 release_object( irp
->async
);
296 wake_up( &irp
->obj
, 0 );
298 /* remove it from the device queue */
299 list_remove( &irp
->dev_entry
);
300 release_object( irp
); /* no longer on the device queue */
301 release_object( file
);
305 static void device_dump( struct object
*obj
, int verbose
)
307 fputs( "Device\n", stderr
);
310 static struct object_type
*device_get_type( struct object
*obj
)
312 static const WCHAR name
[] = {'D','e','v','i','c','e'};
313 static const struct unicode_str str
= { name
, sizeof(name
) };
314 return get_object_type( &str
);
317 static void device_destroy( struct object
*obj
)
319 struct device
*device
= (struct device
*)obj
;
321 assert( list_empty( &device
->files
));
323 free( device
->unix_path
);
324 if (device
->manager
) list_remove( &device
->entry
);
327 static void add_irp_to_queue( struct device_file
*file
, struct irp_call
*irp
, struct thread
*thread
)
329 struct device_manager
*manager
= file
->device
->manager
;
333 grab_object( irp
); /* grab reference for queued irp */
334 irp
->thread
= thread
? (struct thread
*)grab_object( thread
) : NULL
;
335 list_add_tail( &file
->requests
, &irp
->dev_entry
);
336 list_add_tail( &manager
->requests
, &irp
->mgr_entry
);
337 if (list_head( &manager
->requests
) == &irp
->mgr_entry
) wake_up( &manager
->obj
, 0 ); /* first one */
340 static struct object
*device_open_file( struct object
*obj
, unsigned int access
,
341 unsigned int sharing
, unsigned int options
)
343 struct device
*device
= (struct device
*)obj
;
344 struct device_file
*file
;
346 if (!(file
= alloc_object( &device_file_ops
))) return NULL
;
348 file
->device
= (struct device
*)grab_object( device
);
350 list_init( &file
->requests
);
351 list_add_tail( &device
->files
, &file
->entry
);
352 if (device
->unix_path
)
355 access
= file
->obj
.ops
->map_access( &file
->obj
, access
);
356 file
->fd
= open_fd( NULL
, device
->unix_path
, O_NONBLOCK
| O_LARGEFILE
,
357 &mode
, access
, sharing
, options
);
358 if (file
->fd
) set_fd_user( file
->fd
, &device_file_fd_ops
, &file
->obj
);
360 else file
->fd
= alloc_pseudo_fd( &device_file_fd_ops
, &file
->obj
, 0 );
364 release_object( file
);
368 allow_fd_caching( file
->fd
);
372 struct irp_call
*irp
;
375 memset( ¶ms
, 0, sizeof(params
) );
376 params
.create
.major
= IRP_MJ_CREATE
;
377 params
.create
.access
= access
;
378 params
.create
.sharing
= sharing
;
379 params
.create
.options
= options
;
380 params
.create
.device
= file
->device
->user_ptr
;
382 if ((irp
= create_irp( file
, ¶ms
, NULL
)))
384 add_irp_to_queue( file
, irp
, NULL
);
385 release_object( irp
);
391 static void device_file_dump( struct object
*obj
, int verbose
)
393 struct device_file
*file
= (struct device_file
*)obj
;
395 fprintf( stderr
, "File on device %p\n", file
->device
);
398 static struct fd
*device_file_get_fd( struct object
*obj
)
400 struct device_file
*file
= (struct device_file
*)obj
;
402 return (struct fd
*)grab_object( file
->fd
);
405 static int device_file_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
407 struct device_file
*file
= (struct device_file
*)obj
;
409 if (file
->device
->manager
&& obj
->handle_count
== 1) /* last handle */
411 struct irp_call
*irp
;
414 memset( ¶ms
, 0, sizeof(params
) );
415 params
.close
.major
= IRP_MJ_CLOSE
;
416 params
.close
.file
= file
->user_ptr
;
418 if ((irp
= create_irp( file
, ¶ms
, NULL
)))
420 add_irp_to_queue( file
, irp
, NULL
);
421 release_object( irp
);
427 static void device_file_destroy( struct object
*obj
)
429 struct device_file
*file
= (struct device_file
*)obj
;
430 struct irp_call
*irp
, *next
;
432 LIST_FOR_EACH_ENTRY_SAFE( irp
, next
, &file
->requests
, struct irp_call
, dev_entry
)
434 list_remove( &irp
->dev_entry
);
435 release_object( irp
); /* no longer on the device queue */
437 if (file
->fd
) release_object( file
->fd
);
438 list_remove( &file
->entry
);
439 release_object( file
->device
);
442 static void set_file_user_ptr( struct device_file
*file
, client_ptr_t ptr
)
444 struct irp_call
*irp
;
446 if (file
->user_ptr
== ptr
) return; /* nothing to do */
448 file
->user_ptr
= ptr
;
450 /* update already queued irps */
452 LIST_FOR_EACH_ENTRY( irp
, &file
->requests
, struct irp_call
, dev_entry
)
454 switch (irp
->params
.major
)
456 case IRP_MJ_CLOSE
: irp
->params
.close
.file
= ptr
; break;
457 case IRP_MJ_READ
: irp
->params
.read
.file
= ptr
; break;
458 case IRP_MJ_WRITE
: irp
->params
.write
.file
= ptr
; break;
459 case IRP_MJ_FLUSH_BUFFERS
: irp
->params
.flush
.file
= ptr
; break;
460 case IRP_MJ_DEVICE_CONTROL
: irp
->params
.ioctl
.file
= ptr
; break;
465 /* queue an irp to the device */
466 static int queue_irp( struct device_file
*file
, const irp_params_t
*params
, struct async
*async
)
468 struct irp_call
*irp
= create_irp( file
, params
, async
);
471 fd_queue_async( file
->fd
, async
, ASYNC_TYPE_WAIT
);
472 irp
->async
= (struct async
*)grab_object( async
);
473 add_irp_to_queue( file
, irp
, current
);
474 release_object( irp
);
475 set_error( STATUS_PENDING
);
479 static enum server_fd_type
device_file_get_fd_type( struct fd
*fd
)
481 return FD_TYPE_DEVICE
;
484 static int device_file_read( struct fd
*fd
, struct async
*async
, file_pos_t pos
)
486 struct device_file
*file
= get_fd_user( fd
);
489 memset( ¶ms
, 0, sizeof(params
) );
490 params
.read
.major
= IRP_MJ_READ
;
492 params
.read
.pos
= pos
;
493 params
.read
.file
= file
->user_ptr
;
494 return queue_irp( file
, ¶ms
, async
);
497 static int device_file_write( struct fd
*fd
, struct async
*async
, file_pos_t pos
)
499 struct device_file
*file
= get_fd_user( fd
);
502 memset( ¶ms
, 0, sizeof(params
) );
503 params
.write
.major
= IRP_MJ_WRITE
;
504 params
.write
.key
= 0;
505 params
.write
.pos
= pos
;
506 params
.write
.file
= file
->user_ptr
;
507 return queue_irp( file
, ¶ms
, async
);
510 static int device_file_flush( struct fd
*fd
, struct async
*async
)
512 struct device_file
*file
= get_fd_user( fd
);
515 memset( ¶ms
, 0, sizeof(params
) );
516 params
.flush
.major
= IRP_MJ_FLUSH_BUFFERS
;
517 params
.flush
.file
= file
->user_ptr
;
518 return queue_irp( file
, ¶ms
, NULL
);
521 static int device_file_ioctl( struct fd
*fd
, ioctl_code_t code
, struct async
*async
)
523 struct device_file
*file
= get_fd_user( fd
);
526 memset( ¶ms
, 0, sizeof(params
) );
527 params
.ioctl
.major
= IRP_MJ_DEVICE_CONTROL
;
528 params
.ioctl
.code
= code
;
529 params
.ioctl
.file
= file
->user_ptr
;
530 return queue_irp( file
, ¶ms
, async
);
533 static struct device
*create_device( struct object
*root
, const struct unicode_str
*name
,
534 struct device_manager
*manager
, unsigned int attr
)
536 struct device
*device
;
538 if ((device
= create_named_object( root
, &device_ops
, name
, attr
, NULL
)))
540 if (get_error() != STATUS_OBJECT_NAME_EXISTS
)
542 /* initialize it if it didn't already exist */
543 device
->unix_path
= NULL
;
544 device
->manager
= manager
;
545 list_add_tail( &manager
->devices
, &device
->entry
);
546 list_init( &device
->files
);
552 struct object
*create_unix_device( struct object
*root
, const struct unicode_str
*name
,
553 const char *unix_path
)
555 struct device
*device
;
557 if ((device
= create_named_object( root
, &device_ops
, name
, 0, NULL
)))
559 device
->unix_path
= strdup( unix_path
);
560 device
->manager
= NULL
; /* no manager, requests go straight to the Unix device */
561 list_init( &device
->files
);
567 /* terminate requests when the underlying device is deleted */
568 static void delete_file( struct device_file
*file
)
570 struct irp_call
*irp
, *next
;
572 /* terminate all pending requests */
573 LIST_FOR_EACH_ENTRY_SAFE( irp
, next
, &file
->requests
, struct irp_call
, dev_entry
)
575 list_remove( &irp
->mgr_entry
);
576 set_irp_result( irp
, STATUS_FILE_DELETED
, NULL
, 0, 0 );
580 static void delete_device( struct device
*device
)
582 struct device_file
*file
, *next
;
584 if (!device
->manager
) return; /* already deleted */
586 LIST_FOR_EACH_ENTRY_SAFE( file
, next
, &device
->files
, struct device_file
, entry
)
589 unlink_named_object( &device
->obj
);
590 list_remove( &device
->entry
);
591 device
->manager
= NULL
;
595 static void device_manager_dump( struct object
*obj
, int verbose
)
597 fprintf( stderr
, "Device manager\n" );
600 static int device_manager_signaled( struct object
*obj
, struct wait_queue_entry
*entry
)
602 struct device_manager
*manager
= (struct device_manager
*)obj
;
604 return !list_empty( &manager
->requests
);
607 static void device_manager_destroy( struct object
*obj
)
609 struct device_manager
*manager
= (struct device_manager
*)obj
;
612 while ((ptr
= list_head( &manager
->devices
)))
614 struct device
*device
= LIST_ENTRY( ptr
, struct device
, entry
);
615 delete_device( device
);
619 static struct device_manager
*create_device_manager(void)
621 struct device_manager
*manager
;
623 if ((manager
= alloc_object( &device_manager_ops
)))
625 list_init( &manager
->devices
);
626 list_init( &manager
->requests
);
632 /* create a device manager */
633 DECL_HANDLER(create_device_manager
)
635 struct device_manager
*manager
= create_device_manager();
639 reply
->handle
= alloc_handle( current
->process
, manager
, req
->access
, req
->attributes
);
640 release_object( manager
);
645 /* create a device */
646 DECL_HANDLER(create_device
)
648 struct device
*device
;
649 struct unicode_str name
= get_req_unicode_str();
650 struct device_manager
*manager
;
651 struct object
*root
= NULL
;
653 if (!(manager
= (struct device_manager
*)get_handle_obj( current
->process
, req
->manager
,
654 0, &device_manager_ops
)))
657 if (req
->rootdir
&& !(root
= get_directory_obj( current
->process
, req
->rootdir
)))
659 release_object( manager
);
663 if ((device
= create_device( root
, &name
, manager
, req
->attributes
)))
665 device
->user_ptr
= req
->user_ptr
;
666 reply
->handle
= alloc_handle( current
->process
, device
, req
->access
, req
->attributes
);
667 release_object( device
);
670 if (root
) release_object( root
);
671 release_object( manager
);
675 /* delete a device */
676 DECL_HANDLER(delete_device
)
678 struct device
*device
;
680 if ((device
= (struct device
*)get_handle_obj( current
->process
, req
->handle
, 0, &device_ops
)))
682 delete_device( device
);
683 release_object( device
);
688 /* retrieve the next pending device irp request */
689 DECL_HANDLER(get_next_device_request
)
691 struct irp_call
*irp
;
692 struct device_manager
*manager
;
696 reply
->params
.major
= IRP_MJ_MAXIMUM_FUNCTION
+ 1;
698 if (!(manager
= (struct device_manager
*)get_handle_obj( current
->process
, req
->manager
,
699 0, &device_manager_ops
)))
704 if ((irp
= (struct irp_call
*)get_handle_obj( current
->process
, req
->prev
, 0, &irp_call_ops
)))
706 set_irp_result( irp
, req
->status
, NULL
, 0, 0 );
707 close_handle( current
->process
, req
->prev
); /* avoid an extra round-trip for close */
708 release_object( irp
);
713 if ((ptr
= list_head( &manager
->requests
)))
715 irp
= LIST_ENTRY( ptr
, struct irp_call
, mgr_entry
);
718 reply
->client_pid
= get_process_id( irp
->thread
->process
);
719 reply
->client_tid
= get_thread_id( irp
->thread
);
721 reply
->params
= irp
->params
;
723 reply
->in_size
= iosb
->in_size
;
724 reply
->out_size
= iosb
->out_size
;
725 if (iosb
->in_size
> get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW
);
726 else if ((reply
->next
= alloc_handle( current
->process
, irp
, 0, 0 )))
728 set_reply_data_ptr( iosb
->in_data
, iosb
->in_size
);
729 iosb
->in_data
= NULL
;
731 list_remove( &irp
->mgr_entry
);
732 list_init( &irp
->mgr_entry
);
735 else set_error( STATUS_PENDING
);
737 release_object( manager
);
741 /* store results of an async irp */
742 DECL_HANDLER(set_irp_result
)
744 struct irp_call
*irp
;
746 if ((irp
= (struct irp_call
*)get_handle_obj( current
->process
, req
->handle
, 0, &irp_call_ops
)))
748 if (irp
->file
) set_file_user_ptr( irp
->file
, req
->file_ptr
);
749 set_irp_result( irp
, req
->status
, get_req_data(), get_req_data_size(), req
->size
);
750 close_handle( current
->process
, req
->handle
); /* avoid an extra round-trip for close */
751 release_object( irp
);