2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
22 #include "wine/port.h"
31 #define WIN32_NO_STATUS
46 struct object obj
; /* object header */
47 struct list dev_entry
; /* entry in device queue */
48 struct list mgr_entry
; /* entry in manager queue */
49 struct device_file
*file
; /* file containing this irp */
50 struct thread
*thread
; /* thread that queued the irp */
51 client_ptr_t user_arg
; /* user arg used to identify the request */
52 struct async
*async
; /* pending async op */
53 unsigned int status
; /* resulting status (or STATUS_PENDING) */
54 irp_params_t params
; /* irp parameters */
55 data_size_t result
; /* size of result (input or output depending on the type) */
56 data_size_t in_size
; /* size of input data */
57 void *in_data
; /* input data */
58 data_size_t out_size
; /* size of output data */
59 void *out_data
; /* output data */
62 static void irp_call_dump( struct object
*obj
, int verbose
);
63 static int irp_call_signaled( struct object
*obj
, struct wait_queue_entry
*entry
);
64 static void irp_call_destroy( struct object
*obj
);
66 static const struct object_ops irp_call_ops
=
68 sizeof(struct irp_call
), /* size */
69 irp_call_dump
, /* dump */
70 no_get_type
, /* get_type */
71 add_queue
, /* add_queue */
72 remove_queue
, /* remove_queue */
73 irp_call_signaled
, /* signaled */
74 no_satisfied
, /* satisfied */
75 no_signal
, /* signal */
76 no_get_fd
, /* get_fd */
77 no_map_access
, /* map_access */
78 default_get_sd
, /* get_sd */
79 default_set_sd
, /* set_sd */
80 no_lookup_name
, /* lookup_name */
81 no_link_name
, /* link_name */
82 NULL
, /* unlink_name */
83 no_open_file
, /* open_file */
84 no_close_handle
, /* close_handle */
85 irp_call_destroy
/* destroy */
89 /* device manager (a list of devices managed by the same client process) */
93 struct object obj
; /* object header */
94 struct list devices
; /* list of devices */
95 struct list requests
; /* list of pending irps across all devices */
98 static void device_manager_dump( struct object
*obj
, int verbose
);
99 static int device_manager_signaled( struct object
*obj
, struct wait_queue_entry
*entry
);
100 static void device_manager_destroy( struct object
*obj
);
102 static const struct object_ops device_manager_ops
=
104 sizeof(struct device_manager
), /* size */
105 device_manager_dump
, /* dump */
106 no_get_type
, /* get_type */
107 add_queue
, /* add_queue */
108 remove_queue
, /* remove_queue */
109 device_manager_signaled
, /* signaled */
110 no_satisfied
, /* satisfied */
111 no_signal
, /* signal */
112 no_get_fd
, /* get_fd */
113 no_map_access
, /* map_access */
114 default_get_sd
, /* get_sd */
115 default_set_sd
, /* set_sd */
116 no_lookup_name
, /* lookup_name */
117 no_link_name
, /* link_name */
118 NULL
, /* unlink_name */
119 no_open_file
, /* open_file */
120 no_close_handle
, /* close_handle */
121 device_manager_destroy
/* destroy */
125 /* device (a single device object) */
129 struct object obj
; /* object header */
130 struct device_manager
*manager
; /* manager for this device (or NULL if deleted) */
131 char *unix_path
; /* path to unix device if any */
132 client_ptr_t user_ptr
; /* opaque ptr for client side */
133 struct list entry
; /* entry in device manager list */
134 struct list files
; /* list of open files */
137 static void device_dump( struct object
*obj
, int verbose
);
138 static struct object_type
*device_get_type( struct object
*obj
);
139 static void device_destroy( struct object
*obj
);
140 static struct object
*device_open_file( struct object
*obj
, unsigned int access
,
141 unsigned int sharing
, unsigned int options
);
143 static const struct object_ops device_ops
=
145 sizeof(struct device
), /* size */
146 device_dump
, /* dump */
147 device_get_type
, /* get_type */
148 no_add_queue
, /* add_queue */
149 NULL
, /* remove_queue */
151 no_satisfied
, /* satisfied */
152 no_signal
, /* signal */
153 no_get_fd
, /* get_fd */
154 default_fd_map_access
, /* map_access */
155 default_get_sd
, /* get_sd */
156 default_set_sd
, /* set_sd */
157 no_lookup_name
, /* lookup_name */
158 directory_link_name
, /* link_name */
159 default_unlink_name
, /* unlink_name */
160 device_open_file
, /* open_file */
161 no_close_handle
, /* close_handle */
162 device_destroy
/* destroy */
166 /* device file (an open file handle to a device) */
170 struct object obj
; /* object header */
171 struct device
*device
; /* device for this file */
172 struct fd
*fd
; /* file descriptor for irp */
173 client_ptr_t user_ptr
; /* opaque ptr for client side */
174 struct list entry
; /* entry in device list */
175 struct list requests
; /* list of pending irp requests */
178 static void device_file_dump( struct object
*obj
, int verbose
);
179 static struct fd
*device_file_get_fd( struct object
*obj
);
180 static int device_file_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
);
181 static void device_file_destroy( struct object
*obj
);
182 static enum server_fd_type
device_file_get_fd_type( struct fd
*fd
);
183 static obj_handle_t
device_file_read( struct fd
*fd
, const async_data_t
*async_data
, int blocking
,
185 static obj_handle_t
device_file_write( struct fd
*fd
, const async_data_t
*async_data
, int blocking
,
186 file_pos_t pos
, data_size_t
*written
);
187 static obj_handle_t
device_file_flush( struct fd
*fd
, const async_data_t
*async_data
, int blocking
);
188 static obj_handle_t
device_file_ioctl( struct fd
*fd
, ioctl_code_t code
, const async_data_t
*async_data
,
191 static const struct object_ops device_file_ops
=
193 sizeof(struct device_file
), /* size */
194 device_file_dump
, /* dump */
195 no_get_type
, /* get_type */
196 add_queue
, /* add_queue */
197 remove_queue
, /* remove_queue */
198 default_fd_signaled
, /* signaled */
199 no_satisfied
, /* satisfied */
200 no_signal
, /* signal */
201 device_file_get_fd
, /* get_fd */
202 default_fd_map_access
, /* map_access */
203 default_get_sd
, /* get_sd */
204 default_set_sd
, /* set_sd */
205 no_lookup_name
, /* lookup_name */
206 no_link_name
, /* link_name */
207 NULL
, /* unlink_name */
208 no_open_file
, /* open_file */
209 device_file_close_handle
, /* close_handle */
210 device_file_destroy
/* destroy */
213 static const struct fd_ops device_file_fd_ops
=
215 default_fd_get_poll_events
, /* get_poll_events */
216 default_poll_event
, /* poll_event */
217 device_file_get_fd_type
, /* get_fd_type */
218 device_file_read
, /* read */
219 device_file_write
, /* write */
220 device_file_flush
, /* flush */
221 device_file_ioctl
, /* ioctl */
222 default_fd_queue_async
, /* queue_async */
223 default_fd_reselect_async
, /* reselect_async */
224 default_fd_cancel_async
/* cancel_async */
228 static void irp_call_dump( struct object
*obj
, int verbose
)
230 struct irp_call
*irp
= (struct irp_call
*)obj
;
231 fprintf( stderr
, "IRP call file=%p\n", irp
->file
);
234 static int irp_call_signaled( struct object
*obj
, struct wait_queue_entry
*entry
)
236 struct irp_call
*irp
= (struct irp_call
*)obj
;
238 return !irp
->file
; /* file is cleared once the irp has completed */
241 static void irp_call_destroy( struct object
*obj
)
243 struct irp_call
*irp
= (struct irp_call
*)obj
;
245 free( irp
->in_data
);
246 free( irp
->out_data
);
249 async_terminate( irp
->async
, STATUS_CANCELLED
);
250 release_object( irp
->async
);
252 if (irp
->file
) release_object( irp
->file
);
253 if (irp
->thread
) release_object( irp
->thread
);
256 static struct irp_call
*create_irp( struct device_file
*file
, const irp_params_t
*params
,
257 const void *in_data
, data_size_t in_size
, data_size_t out_size
)
259 struct irp_call
*irp
;
261 if (!file
->device
->manager
) /* it has been deleted */
263 set_error( STATUS_FILE_DELETED
);
267 if ((irp
= alloc_object( &irp_call_ops
)))
269 irp
->file
= (struct device_file
*)grab_object( file
);
272 irp
->params
= *params
;
273 irp
->status
= STATUS_PENDING
;
275 irp
->in_size
= in_size
;
277 irp
->out_size
= out_size
;
278 irp
->out_data
= NULL
;
280 if (irp
->in_size
&& !(irp
->in_data
= memdup( in_data
, in_size
)))
282 release_object( irp
);
289 static void set_irp_result( struct irp_call
*irp
, unsigned int status
,
290 const void *out_data
, data_size_t out_size
, data_size_t result
)
292 struct device_file
*file
= irp
->file
;
294 if (!file
) return; /* already finished */
296 /* FIXME: handle the STATUS_PENDING case */
297 irp
->status
= status
;
298 irp
->result
= result
;
299 irp
->out_size
= min( irp
->out_size
, out_size
);
300 if (irp
->out_size
&& !(irp
->out_data
= memdup( out_data
, irp
->out_size
)))
305 if (result
) status
= STATUS_ALERTED
;
306 async_terminate( irp
->async
, status
);
307 release_object( irp
->async
);
310 wake_up( &irp
->obj
, 0 );
312 if (status
!= STATUS_ALERTED
)
314 /* remove it from the device queue */
315 /* (for STATUS_ALERTED this will be done in get_irp_result) */
316 list_remove( &irp
->dev_entry
);
317 release_object( irp
); /* no longer on the device queue */
319 release_object( file
);
323 static void device_dump( struct object
*obj
, int verbose
)
325 fputs( "Device\n", stderr
);
328 static struct object_type
*device_get_type( struct object
*obj
)
330 static const WCHAR name
[] = {'D','e','v','i','c','e'};
331 static const struct unicode_str str
= { name
, sizeof(name
) };
332 return get_object_type( &str
);
335 static void device_destroy( struct object
*obj
)
337 struct device
*device
= (struct device
*)obj
;
339 assert( list_empty( &device
->files
));
341 free( device
->unix_path
);
342 if (device
->manager
) list_remove( &device
->entry
);
345 static void add_irp_to_queue( struct device_file
*file
, struct irp_call
*irp
, struct thread
*thread
)
347 struct device_manager
*manager
= file
->device
->manager
;
351 grab_object( irp
); /* grab reference for queued irp */
352 irp
->thread
= thread
? (struct thread
*)grab_object( thread
) : NULL
;
353 list_add_tail( &file
->requests
, &irp
->dev_entry
);
354 list_add_tail( &manager
->requests
, &irp
->mgr_entry
);
355 if (list_head( &manager
->requests
) == &irp
->mgr_entry
) wake_up( &manager
->obj
, 0 ); /* first one */
358 static struct object
*device_open_file( struct object
*obj
, unsigned int access
,
359 unsigned int sharing
, unsigned int options
)
361 struct device
*device
= (struct device
*)obj
;
362 struct device_file
*file
;
364 if (!(file
= alloc_object( &device_file_ops
))) return NULL
;
366 file
->device
= (struct device
*)grab_object( device
);
368 list_init( &file
->requests
);
369 list_add_tail( &device
->files
, &file
->entry
);
370 if (device
->unix_path
)
373 access
= file
->obj
.ops
->map_access( &file
->obj
, access
);
374 file
->fd
= open_fd( NULL
, device
->unix_path
, O_NONBLOCK
| O_LARGEFILE
,
375 &mode
, access
, sharing
, options
);
376 if (file
->fd
) set_fd_user( file
->fd
, &device_file_fd_ops
, &file
->obj
);
378 else file
->fd
= alloc_pseudo_fd( &device_file_fd_ops
, &file
->obj
, 0 );
382 release_object( file
);
386 allow_fd_caching( file
->fd
);
390 struct irp_call
*irp
;
393 memset( ¶ms
, 0, sizeof(params
) );
394 params
.create
.major
= IRP_MJ_CREATE
;
395 params
.create
.access
= access
;
396 params
.create
.sharing
= sharing
;
397 params
.create
.options
= options
;
398 params
.create
.device
= file
->device
->user_ptr
;
400 if ((irp
= create_irp( file
, ¶ms
, NULL
, 0, 0 )))
402 add_irp_to_queue( file
, irp
, NULL
);
403 release_object( irp
);
409 static void device_file_dump( struct object
*obj
, int verbose
)
411 struct device_file
*file
= (struct device_file
*)obj
;
413 fprintf( stderr
, "File on device %p\n", file
->device
);
416 static struct fd
*device_file_get_fd( struct object
*obj
)
418 struct device_file
*file
= (struct device_file
*)obj
;
420 return (struct fd
*)grab_object( file
->fd
);
423 static int device_file_close_handle( struct object
*obj
, struct process
*process
, obj_handle_t handle
)
425 struct device_file
*file
= (struct device_file
*)obj
;
427 if (file
->device
->manager
&& obj
->handle_count
== 1) /* last handle */
429 struct irp_call
*irp
;
432 memset( ¶ms
, 0, sizeof(params
) );
433 params
.close
.major
= IRP_MJ_CLOSE
;
434 params
.close
.file
= file
->user_ptr
;
436 if ((irp
= create_irp( file
, ¶ms
, NULL
, 0, 0 )))
438 add_irp_to_queue( file
, irp
, NULL
);
439 release_object( irp
);
445 static void device_file_destroy( struct object
*obj
)
447 struct device_file
*file
= (struct device_file
*)obj
;
448 struct irp_call
*irp
, *next
;
450 LIST_FOR_EACH_ENTRY_SAFE( irp
, next
, &file
->requests
, struct irp_call
, dev_entry
)
452 list_remove( &irp
->dev_entry
);
453 release_object( irp
); /* no longer on the device queue */
455 if (file
->fd
) release_object( file
->fd
);
456 list_remove( &file
->entry
);
457 release_object( file
->device
);
460 static struct irp_call
*find_irp_call( struct device_file
*file
, struct thread
*thread
,
461 client_ptr_t user_arg
)
463 struct irp_call
*irp
;
465 LIST_FOR_EACH_ENTRY( irp
, &file
->requests
, struct irp_call
, dev_entry
)
466 if (irp
->thread
== thread
&& irp
->user_arg
== user_arg
) return irp
;
468 set_error( STATUS_INVALID_PARAMETER
);
472 static void set_file_user_ptr( struct device_file
*file
, client_ptr_t ptr
)
474 struct irp_call
*irp
;
476 if (file
->user_ptr
== ptr
) return; /* nothing to do */
478 file
->user_ptr
= ptr
;
480 /* update already queued irps */
482 LIST_FOR_EACH_ENTRY( irp
, &file
->requests
, struct irp_call
, dev_entry
)
484 switch (irp
->params
.major
)
486 case IRP_MJ_CLOSE
: irp
->params
.close
.file
= ptr
; break;
487 case IRP_MJ_READ
: irp
->params
.read
.file
= ptr
; break;
488 case IRP_MJ_WRITE
: irp
->params
.write
.file
= ptr
; break;
489 case IRP_MJ_FLUSH_BUFFERS
: irp
->params
.flush
.file
= ptr
; break;
490 case IRP_MJ_DEVICE_CONTROL
: irp
->params
.ioctl
.file
= ptr
; break;
495 /* queue an irp to the device */
496 static obj_handle_t
queue_irp( struct device_file
*file
, struct irp_call
*irp
,
497 const async_data_t
*async_data
, int blocking
)
499 obj_handle_t handle
= 0;
501 if (blocking
&& !(handle
= alloc_handle( current
->process
, irp
, SYNCHRONIZE
, 0 ))) return 0;
503 if (!(irp
->async
= fd_queue_async( file
->fd
, async_data
, ASYNC_TYPE_WAIT
)))
505 if (handle
) close_handle( current
->process
, handle
);
508 irp
->user_arg
= async_data
->arg
;
509 add_irp_to_queue( file
, irp
, current
);
510 set_error( STATUS_PENDING
);
514 static enum server_fd_type
device_file_get_fd_type( struct fd
*fd
)
516 return FD_TYPE_DEVICE
;
519 static obj_handle_t
device_file_read( struct fd
*fd
, const async_data_t
*async_data
, int blocking
,
522 struct device_file
*file
= get_fd_user( fd
);
523 struct irp_call
*irp
;
527 memset( ¶ms
, 0, sizeof(params
) );
528 params
.read
.major
= IRP_MJ_READ
;
530 params
.read
.pos
= pos
;
531 params
.read
.file
= file
->user_ptr
;
533 irp
= create_irp( file
, ¶ms
, NULL
, 0, get_reply_max_size() );
536 handle
= queue_irp( file
, irp
, async_data
, blocking
);
537 release_object( irp
);
541 static obj_handle_t
device_file_write( struct fd
*fd
, const async_data_t
*async_data
, int blocking
,
542 file_pos_t pos
, data_size_t
*written
)
544 struct device_file
*file
= get_fd_user( fd
);
545 struct irp_call
*irp
;
549 memset( ¶ms
, 0, sizeof(params
) );
550 params
.write
.major
= IRP_MJ_WRITE
;
551 params
.write
.key
= 0;
552 params
.write
.pos
= pos
;
553 params
.write
.file
= file
->user_ptr
;
555 irp
= create_irp( file
, ¶ms
, get_req_data(), get_req_data_size(), 0 );
558 handle
= queue_irp( file
, irp
, async_data
, blocking
);
559 release_object( irp
);
563 static obj_handle_t
device_file_flush( struct fd
*fd
, const async_data_t
*async_data
, int blocking
)
565 struct device_file
*file
= get_fd_user( fd
);
566 struct irp_call
*irp
;
570 memset( ¶ms
, 0, sizeof(params
) );
571 params
.flush
.major
= IRP_MJ_FLUSH_BUFFERS
;
572 params
.flush
.file
= file
->user_ptr
;
574 irp
= create_irp( file
, ¶ms
, NULL
, 0, 0 );
577 handle
= queue_irp( file
, irp
, async_data
, blocking
);
578 release_object( irp
);
582 static obj_handle_t
device_file_ioctl( struct fd
*fd
, ioctl_code_t code
, const async_data_t
*async_data
,
585 struct device_file
*file
= get_fd_user( fd
);
586 struct irp_call
*irp
;
590 memset( ¶ms
, 0, sizeof(params
) );
591 params
.ioctl
.major
= IRP_MJ_DEVICE_CONTROL
;
592 params
.ioctl
.code
= code
;
593 params
.ioctl
.file
= file
->user_ptr
;
595 irp
= create_irp( file
, ¶ms
, get_req_data(), get_req_data_size(),
596 get_reply_max_size() );
599 handle
= queue_irp( file
, irp
, async_data
, blocking
);
600 release_object( irp
);
604 static struct device
*create_device( struct object
*root
, const struct unicode_str
*name
,
605 struct device_manager
*manager
, unsigned int attr
)
607 struct device
*device
;
609 if ((device
= create_named_object( root
, &device_ops
, name
, attr
, NULL
)))
611 if (get_error() != STATUS_OBJECT_NAME_EXISTS
)
613 /* initialize it if it didn't already exist */
614 device
->unix_path
= NULL
;
615 device
->manager
= manager
;
616 list_add_tail( &manager
->devices
, &device
->entry
);
617 list_init( &device
->files
);
623 struct object
*create_unix_device( struct object
*root
, const struct unicode_str
*name
,
624 const char *unix_path
)
626 struct device
*device
;
628 if ((device
= create_named_object( root
, &device_ops
, name
, 0, NULL
)))
630 device
->unix_path
= strdup( unix_path
);
631 device
->manager
= NULL
; /* no manager, requests go straight to the Unix device */
632 list_init( &device
->files
);
638 /* terminate requests when the underlying device is deleted */
639 static void delete_file( struct device_file
*file
)
641 struct irp_call
*irp
, *next
;
643 /* terminate all pending requests */
644 LIST_FOR_EACH_ENTRY_SAFE( irp
, next
, &file
->requests
, struct irp_call
, dev_entry
)
646 list_remove( &irp
->mgr_entry
);
647 set_irp_result( irp
, STATUS_FILE_DELETED
, NULL
, 0, 0 );
651 static void delete_device( struct device
*device
)
653 struct device_file
*file
, *next
;
655 if (!device
->manager
) return; /* already deleted */
657 LIST_FOR_EACH_ENTRY_SAFE( file
, next
, &device
->files
, struct device_file
, entry
)
660 unlink_named_object( &device
->obj
);
661 list_remove( &device
->entry
);
662 device
->manager
= NULL
;
666 static void device_manager_dump( struct object
*obj
, int verbose
)
668 fprintf( stderr
, "Device manager\n" );
671 static int device_manager_signaled( struct object
*obj
, struct wait_queue_entry
*entry
)
673 struct device_manager
*manager
= (struct device_manager
*)obj
;
675 return !list_empty( &manager
->requests
);
678 static void device_manager_destroy( struct object
*obj
)
680 struct device_manager
*manager
= (struct device_manager
*)obj
;
683 while ((ptr
= list_head( &manager
->devices
)))
685 struct device
*device
= LIST_ENTRY( ptr
, struct device
, entry
);
686 delete_device( device
);
690 static struct device_manager
*create_device_manager(void)
692 struct device_manager
*manager
;
694 if ((manager
= alloc_object( &device_manager_ops
)))
696 list_init( &manager
->devices
);
697 list_init( &manager
->requests
);
703 /* create a device manager */
704 DECL_HANDLER(create_device_manager
)
706 struct device_manager
*manager
= create_device_manager();
710 reply
->handle
= alloc_handle( current
->process
, manager
, req
->access
, req
->attributes
);
711 release_object( manager
);
716 /* create a device */
717 DECL_HANDLER(create_device
)
719 struct device
*device
;
720 struct unicode_str name
= get_req_unicode_str();
721 struct device_manager
*manager
;
722 struct object
*root
= NULL
;
724 if (!(manager
= (struct device_manager
*)get_handle_obj( current
->process
, req
->manager
,
725 0, &device_manager_ops
)))
728 if (req
->rootdir
&& !(root
= get_directory_obj( current
->process
, req
->rootdir
)))
730 release_object( manager
);
734 if ((device
= create_device( root
, &name
, manager
, req
->attributes
)))
736 device
->user_ptr
= req
->user_ptr
;
737 reply
->handle
= alloc_handle( current
->process
, device
, req
->access
, req
->attributes
);
738 release_object( device
);
741 if (root
) release_object( root
);
742 release_object( manager
);
746 /* delete a device */
747 DECL_HANDLER(delete_device
)
749 struct device
*device
;
751 if ((device
= (struct device
*)get_handle_obj( current
->process
, req
->handle
, 0, &device_ops
)))
753 delete_device( device
);
754 release_object( device
);
759 /* retrieve the next pending device irp request */
760 DECL_HANDLER(get_next_device_request
)
762 struct irp_call
*irp
;
763 struct device_manager
*manager
;
766 reply
->params
.major
= IRP_MJ_MAXIMUM_FUNCTION
+ 1;
768 if (!(manager
= (struct device_manager
*)get_handle_obj( current
->process
, req
->manager
,
769 0, &device_manager_ops
)))
774 if ((irp
= (struct irp_call
*)get_handle_obj( current
->process
, req
->prev
, 0, &irp_call_ops
)))
776 set_irp_result( irp
, req
->status
, NULL
, 0, 0 );
777 close_handle( current
->process
, req
->prev
); /* avoid an extra round-trip for close */
778 release_object( irp
);
783 if ((ptr
= list_head( &manager
->requests
)))
785 irp
= LIST_ENTRY( ptr
, struct irp_call
, mgr_entry
);
788 reply
->client_pid
= get_process_id( irp
->thread
->process
);
789 reply
->client_tid
= get_thread_id( irp
->thread
);
791 reply
->params
= irp
->params
;
792 reply
->in_size
= irp
->in_size
;
793 reply
->out_size
= irp
->out_size
;
794 if (irp
->in_size
> get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW
);
795 else if ((reply
->next
= alloc_handle( current
->process
, irp
, 0, 0 )))
797 set_reply_data_ptr( irp
->in_data
, irp
->in_size
);
800 list_remove( &irp
->mgr_entry
);
801 list_init( &irp
->mgr_entry
);
804 else set_error( STATUS_PENDING
);
806 release_object( manager
);
810 /* store results of an async irp */
811 DECL_HANDLER(set_irp_result
)
813 struct irp_call
*irp
;
815 if ((irp
= (struct irp_call
*)get_handle_obj( current
->process
, req
->handle
, 0, &irp_call_ops
)))
817 if (irp
->file
) set_file_user_ptr( irp
->file
, req
->file_ptr
);
818 set_irp_result( irp
, req
->status
, get_req_data(), get_req_data_size(), req
->size
);
819 close_handle( current
->process
, req
->handle
); /* avoid an extra round-trip for close */
820 release_object( irp
);
825 /* retrieve results of an async irp */
826 DECL_HANDLER(get_irp_result
)
828 struct device_file
*file
;
829 struct irp_call
*irp
;
831 if (!(file
= (struct device_file
*)get_handle_obj( current
->process
, req
->handle
,
832 0, &device_file_ops
)))
835 if ((irp
= find_irp_call( file
, current
, req
->user_arg
)))
839 data_size_t size
= min( irp
->out_size
, get_reply_max_size() );
842 set_reply_data_ptr( irp
->out_data
, size
);
843 irp
->out_data
= NULL
;
846 reply
->size
= irp
->result
;
847 set_error( irp
->status
);
848 list_remove( &irp
->dev_entry
);
849 release_object( irp
); /* no longer on the device queue */
851 release_object( file
);