shell32/tests: Accept another error from SHFileOperationA.
[wine.git] / server / device.c
blob975507e42f99d9675df2c48044a7e908e7aebbd1
1 /*
2 * Server-side device support
4 * Copyright (C) 2007 Alexandre Julliard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
21 #include "config.h"
22 #include "wine/port.h"
24 #include <assert.h>
25 #include <fcntl.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <stdarg.h>
30 #include "ntstatus.h"
31 #define WIN32_NO_STATUS
32 #include "windef.h"
33 #include "winternl.h"
34 #include "ddk/wdm.h"
36 #include "object.h"
37 #include "file.h"
38 #include "handle.h"
39 #include "request.h"
40 #include "process.h"
42 /* IRP object */
44 struct irp_call
46 struct object obj; /* object header */
47 struct list dev_entry; /* entry in device queue */
48 struct list mgr_entry; /* entry in manager queue */
49 struct device_file *file; /* file containing this irp */
50 struct thread *thread; /* thread that queued the irp */
51 client_ptr_t user_arg; /* user arg used to identify the request */
52 struct async *async; /* pending async op */
53 irp_params_t params; /* irp parameters */
54 struct iosb *iosb; /* I/O status block */
57 static void irp_call_dump( struct object *obj, int verbose );
58 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
59 static void irp_call_destroy( struct object *obj );
61 static const struct object_ops irp_call_ops =
63 sizeof(struct irp_call), /* size */
64 irp_call_dump, /* dump */
65 no_get_type, /* get_type */
66 add_queue, /* add_queue */
67 remove_queue, /* remove_queue */
68 irp_call_signaled, /* signaled */
69 no_satisfied, /* satisfied */
70 no_signal, /* signal */
71 no_get_fd, /* get_fd */
72 no_map_access, /* map_access */
73 default_get_sd, /* get_sd */
74 default_set_sd, /* set_sd */
75 no_lookup_name, /* lookup_name */
76 no_link_name, /* link_name */
77 NULL, /* unlink_name */
78 no_open_file, /* open_file */
79 no_close_handle, /* close_handle */
80 irp_call_destroy /* destroy */
84 /* device manager (a list of devices managed by the same client process) */
86 struct device_manager
88 struct object obj; /* object header */
89 struct list devices; /* list of devices */
90 struct list requests; /* list of pending irps across all devices */
93 static void device_manager_dump( struct object *obj, int verbose );
94 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
95 static void device_manager_destroy( struct object *obj );
97 static const struct object_ops device_manager_ops =
99 sizeof(struct device_manager), /* size */
100 device_manager_dump, /* dump */
101 no_get_type, /* get_type */
102 add_queue, /* add_queue */
103 remove_queue, /* remove_queue */
104 device_manager_signaled, /* signaled */
105 no_satisfied, /* satisfied */
106 no_signal, /* signal */
107 no_get_fd, /* get_fd */
108 no_map_access, /* map_access */
109 default_get_sd, /* get_sd */
110 default_set_sd, /* set_sd */
111 no_lookup_name, /* lookup_name */
112 no_link_name, /* link_name */
113 NULL, /* unlink_name */
114 no_open_file, /* open_file */
115 no_close_handle, /* close_handle */
116 device_manager_destroy /* destroy */
120 /* device (a single device object) */
122 struct device
124 struct object obj; /* object header */
125 struct device_manager *manager; /* manager for this device (or NULL if deleted) */
126 char *unix_path; /* path to unix device if any */
127 client_ptr_t user_ptr; /* opaque ptr for client side */
128 struct list entry; /* entry in device manager list */
129 struct list files; /* list of open files */
132 static void device_dump( struct object *obj, int verbose );
133 static struct object_type *device_get_type( struct object *obj );
134 static void device_destroy( struct object *obj );
135 static struct object *device_open_file( struct object *obj, unsigned int access,
136 unsigned int sharing, unsigned int options );
138 static const struct object_ops device_ops =
140 sizeof(struct device), /* size */
141 device_dump, /* dump */
142 device_get_type, /* get_type */
143 no_add_queue, /* add_queue */
144 NULL, /* remove_queue */
145 NULL, /* signaled */
146 no_satisfied, /* satisfied */
147 no_signal, /* signal */
148 no_get_fd, /* get_fd */
149 default_fd_map_access, /* map_access */
150 default_get_sd, /* get_sd */
151 default_set_sd, /* set_sd */
152 no_lookup_name, /* lookup_name */
153 directory_link_name, /* link_name */
154 default_unlink_name, /* unlink_name */
155 device_open_file, /* open_file */
156 no_close_handle, /* close_handle */
157 device_destroy /* destroy */
161 /* device file (an open file handle to a device) */
163 struct device_file
165 struct object obj; /* object header */
166 struct device *device; /* device for this file */
167 struct fd *fd; /* file descriptor for irp */
168 client_ptr_t user_ptr; /* opaque ptr for client side */
169 struct list entry; /* entry in device list */
170 struct list requests; /* list of pending irp requests */
173 static void device_file_dump( struct object *obj, int verbose );
174 static struct fd *device_file_get_fd( struct object *obj );
175 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
176 static void device_file_destroy( struct object *obj );
177 static enum server_fd_type device_file_get_fd_type( struct fd *fd );
178 static obj_handle_t device_file_read( struct fd *fd, const async_data_t *async_data, int blocking,
179 file_pos_t pos );
180 static obj_handle_t device_file_write( struct fd *fd, const async_data_t *async_data, int blocking,
181 file_pos_t pos, data_size_t *written );
182 static obj_handle_t device_file_flush( struct fd *fd, const async_data_t *async_data, int blocking );
183 static obj_handle_t device_file_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
184 int blocking );
186 static const struct object_ops device_file_ops =
188 sizeof(struct device_file), /* size */
189 device_file_dump, /* dump */
190 no_get_type, /* get_type */
191 add_queue, /* add_queue */
192 remove_queue, /* remove_queue */
193 default_fd_signaled, /* signaled */
194 no_satisfied, /* satisfied */
195 no_signal, /* signal */
196 device_file_get_fd, /* get_fd */
197 default_fd_map_access, /* map_access */
198 default_get_sd, /* get_sd */
199 default_set_sd, /* set_sd */
200 no_lookup_name, /* lookup_name */
201 no_link_name, /* link_name */
202 NULL, /* unlink_name */
203 no_open_file, /* open_file */
204 device_file_close_handle, /* close_handle */
205 device_file_destroy /* destroy */
208 static const struct fd_ops device_file_fd_ops =
210 default_fd_get_poll_events, /* get_poll_events */
211 default_poll_event, /* poll_event */
212 device_file_get_fd_type, /* get_fd_type */
213 device_file_read, /* read */
214 device_file_write, /* write */
215 device_file_flush, /* flush */
216 device_file_ioctl, /* ioctl */
217 default_fd_queue_async, /* queue_async */
218 default_fd_reselect_async /* reselect_async */
222 static void irp_call_dump( struct object *obj, int verbose )
224 struct irp_call *irp = (struct irp_call *)obj;
225 fprintf( stderr, "IRP call file=%p\n", irp->file );
228 static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
230 struct irp_call *irp = (struct irp_call *)obj;
232 return !irp->file; /* file is cleared once the irp has completed */
235 static void irp_call_destroy( struct object *obj )
237 struct irp_call *irp = (struct irp_call *)obj;
239 if (irp->async)
241 async_terminate( irp->async, STATUS_CANCELLED );
242 release_object( irp->async );
244 if (irp->iosb) release_object( irp->iosb );
245 if (irp->file) release_object( irp->file );
246 if (irp->thread) release_object( irp->thread );
249 static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params,
250 const void *in_data, data_size_t in_size, data_size_t out_size )
252 struct irp_call *irp;
254 if (!file->device->manager) /* it has been deleted */
256 set_error( STATUS_FILE_DELETED );
257 return NULL;
260 if ((irp = alloc_object( &irp_call_ops )))
262 irp->file = (struct device_file *)grab_object( file );
263 irp->thread = NULL;
264 irp->async = NULL;
265 irp->params = *params;
267 if (!(irp->iosb = create_iosb( in_data, in_size, out_size )))
269 release_object( irp );
270 irp = NULL;
273 return irp;
276 static void set_irp_result( struct irp_call *irp, unsigned int status,
277 const void *out_data, data_size_t out_size, data_size_t result )
279 struct device_file *file = irp->file;
280 struct iosb *iosb = irp->iosb;
282 if (!file) return; /* already finished */
284 /* FIXME: handle the STATUS_PENDING case */
285 iosb->status = status;
286 iosb->result = result;
287 iosb->out_size = min( iosb->out_size, out_size );
288 if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
289 iosb->out_size = 0;
290 irp->file = NULL;
291 if (irp->async)
293 if (result) status = STATUS_ALERTED;
294 async_terminate( irp->async, status );
295 release_object( irp->async );
296 irp->async = NULL;
298 wake_up( &irp->obj, 0 );
300 /* remove it from the device queue */
301 list_remove( &irp->dev_entry );
302 release_object( irp ); /* no longer on the device queue */
303 release_object( file );
307 static void device_dump( struct object *obj, int verbose )
309 fputs( "Device\n", stderr );
312 static struct object_type *device_get_type( struct object *obj )
314 static const WCHAR name[] = {'D','e','v','i','c','e'};
315 static const struct unicode_str str = { name, sizeof(name) };
316 return get_object_type( &str );
319 static void device_destroy( struct object *obj )
321 struct device *device = (struct device *)obj;
323 assert( list_empty( &device->files ));
325 free( device->unix_path );
326 if (device->manager) list_remove( &device->entry );
329 static void add_irp_to_queue( struct device_file *file, struct irp_call *irp, struct thread *thread )
331 struct device_manager *manager = file->device->manager;
333 assert( manager );
335 grab_object( irp ); /* grab reference for queued irp */
336 irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
337 list_add_tail( &file->requests, &irp->dev_entry );
338 list_add_tail( &manager->requests, &irp->mgr_entry );
339 if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
342 static struct object *device_open_file( struct object *obj, unsigned int access,
343 unsigned int sharing, unsigned int options )
345 struct device *device = (struct device *)obj;
346 struct device_file *file;
348 if (!(file = alloc_object( &device_file_ops ))) return NULL;
350 file->device = (struct device *)grab_object( device );
351 file->user_ptr = 0;
352 list_init( &file->requests );
353 list_add_tail( &device->files, &file->entry );
354 if (device->unix_path)
356 mode_t mode = 0666;
357 access = file->obj.ops->map_access( &file->obj, access );
358 file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
359 &mode, access, sharing, options );
360 if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
362 else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, 0 );
364 if (!file->fd)
366 release_object( file );
367 return NULL;
370 allow_fd_caching( file->fd );
372 if (device->manager)
374 struct irp_call *irp;
375 irp_params_t params;
377 memset( &params, 0, sizeof(params) );
378 params.create.major = IRP_MJ_CREATE;
379 params.create.access = access;
380 params.create.sharing = sharing;
381 params.create.options = options;
382 params.create.device = file->device->user_ptr;
384 if ((irp = create_irp( file, &params, NULL, 0, 0 )))
386 add_irp_to_queue( file, irp, NULL );
387 release_object( irp );
390 return &file->obj;
393 static void device_file_dump( struct object *obj, int verbose )
395 struct device_file *file = (struct device_file *)obj;
397 fprintf( stderr, "File on device %p\n", file->device );
400 static struct fd *device_file_get_fd( struct object *obj )
402 struct device_file *file = (struct device_file *)obj;
404 return (struct fd *)grab_object( file->fd );
407 static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
409 struct device_file *file = (struct device_file *)obj;
411 if (file->device->manager && obj->handle_count == 1) /* last handle */
413 struct irp_call *irp;
414 irp_params_t params;
416 memset( &params, 0, sizeof(params) );
417 params.close.major = IRP_MJ_CLOSE;
418 params.close.file = file->user_ptr;
420 if ((irp = create_irp( file, &params, NULL, 0, 0 )))
422 add_irp_to_queue( file, irp, NULL );
423 release_object( irp );
426 return 1;
429 static void device_file_destroy( struct object *obj )
431 struct device_file *file = (struct device_file *)obj;
432 struct irp_call *irp, *next;
434 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
436 list_remove( &irp->dev_entry );
437 release_object( irp ); /* no longer on the device queue */
439 if (file->fd) release_object( file->fd );
440 list_remove( &file->entry );
441 release_object( file->device );
444 static void set_file_user_ptr( struct device_file *file, client_ptr_t ptr )
446 struct irp_call *irp;
448 if (file->user_ptr == ptr) return; /* nothing to do */
450 file->user_ptr = ptr;
452 /* update already queued irps */
454 LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
456 switch (irp->params.major)
458 case IRP_MJ_CLOSE: irp->params.close.file = ptr; break;
459 case IRP_MJ_READ: irp->params.read.file = ptr; break;
460 case IRP_MJ_WRITE: irp->params.write.file = ptr; break;
461 case IRP_MJ_FLUSH_BUFFERS: irp->params.flush.file = ptr; break;
462 case IRP_MJ_DEVICE_CONTROL: irp->params.ioctl.file = ptr; break;
467 /* queue an irp to the device */
468 static obj_handle_t queue_irp( struct device_file *file, struct irp_call *irp,
469 const async_data_t *async_data, int blocking )
471 obj_handle_t handle = 0;
473 if (blocking && !(handle = alloc_handle( current->process, irp, SYNCHRONIZE, 0 ))) return 0;
475 if (!(irp->async = fd_queue_async( file->fd, async_data, irp->iosb, ASYNC_TYPE_WAIT )))
477 if (handle) close_handle( current->process, handle );
478 return 0;
480 irp->user_arg = async_data->arg;
481 add_irp_to_queue( file, irp, current );
482 set_error( STATUS_PENDING );
483 return handle;
486 static enum server_fd_type device_file_get_fd_type( struct fd *fd )
488 return FD_TYPE_DEVICE;
491 static obj_handle_t device_file_read( struct fd *fd, const async_data_t *async_data, int blocking,
492 file_pos_t pos )
494 struct device_file *file = get_fd_user( fd );
495 struct irp_call *irp;
496 obj_handle_t handle;
497 irp_params_t params;
499 memset( &params, 0, sizeof(params) );
500 params.read.major = IRP_MJ_READ;
501 params.read.key = 0;
502 params.read.pos = pos;
503 params.read.file = file->user_ptr;
505 irp = create_irp( file, &params, NULL, 0, get_reply_max_size() );
506 if (!irp) return 0;
508 handle = queue_irp( file, irp, async_data, blocking );
509 release_object( irp );
510 return handle;
513 static obj_handle_t device_file_write( struct fd *fd, const async_data_t *async_data, int blocking,
514 file_pos_t pos, data_size_t *written )
516 struct device_file *file = get_fd_user( fd );
517 struct irp_call *irp;
518 obj_handle_t handle;
519 irp_params_t params;
521 memset( &params, 0, sizeof(params) );
522 params.write.major = IRP_MJ_WRITE;
523 params.write.key = 0;
524 params.write.pos = pos;
525 params.write.file = file->user_ptr;
527 irp = create_irp( file, &params, get_req_data(), get_req_data_size(), 0 );
528 if (!irp) return 0;
530 handle = queue_irp( file, irp, async_data, blocking );
531 release_object( irp );
532 return handle;
535 static obj_handle_t device_file_flush( struct fd *fd, const async_data_t *async_data, int blocking )
537 struct device_file *file = get_fd_user( fd );
538 struct irp_call *irp;
539 obj_handle_t handle;
540 irp_params_t params;
542 memset( &params, 0, sizeof(params) );
543 params.flush.major = IRP_MJ_FLUSH_BUFFERS;
544 params.flush.file = file->user_ptr;
546 irp = create_irp( file, &params, NULL, 0, 0 );
547 if (!irp) return 0;
549 handle = queue_irp( file, irp, async_data, blocking );
550 release_object( irp );
551 return handle;
554 static obj_handle_t device_file_ioctl( struct fd *fd, ioctl_code_t code, const async_data_t *async_data,
555 int blocking )
557 struct device_file *file = get_fd_user( fd );
558 struct irp_call *irp;
559 obj_handle_t handle;
560 irp_params_t params;
562 memset( &params, 0, sizeof(params) );
563 params.ioctl.major = IRP_MJ_DEVICE_CONTROL;
564 params.ioctl.code = code;
565 params.ioctl.file = file->user_ptr;
567 irp = create_irp( file, &params, get_req_data(), get_req_data_size(),
568 get_reply_max_size() );
569 if (!irp) return 0;
571 handle = queue_irp( file, irp, async_data, blocking );
572 release_object( irp );
573 return handle;
576 static struct device *create_device( struct object *root, const struct unicode_str *name,
577 struct device_manager *manager, unsigned int attr )
579 struct device *device;
581 if ((device = create_named_object( root, &device_ops, name, attr, NULL )))
583 if (get_error() != STATUS_OBJECT_NAME_EXISTS)
585 /* initialize it if it didn't already exist */
586 device->unix_path = NULL;
587 device->manager = manager;
588 list_add_tail( &manager->devices, &device->entry );
589 list_init( &device->files );
592 return device;
595 struct object *create_unix_device( struct object *root, const struct unicode_str *name,
596 const char *unix_path )
598 struct device *device;
600 if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
602 device->unix_path = strdup( unix_path );
603 device->manager = NULL; /* no manager, requests go straight to the Unix device */
604 list_init( &device->files );
606 return &device->obj;
610 /* terminate requests when the underlying device is deleted */
611 static void delete_file( struct device_file *file )
613 struct irp_call *irp, *next;
615 /* terminate all pending requests */
616 LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
618 list_remove( &irp->mgr_entry );
619 set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
623 static void delete_device( struct device *device )
625 struct device_file *file, *next;
627 if (!device->manager) return; /* already deleted */
629 LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
630 delete_file( file );
632 unlink_named_object( &device->obj );
633 list_remove( &device->entry );
634 device->manager = NULL;
638 static void device_manager_dump( struct object *obj, int verbose )
640 fprintf( stderr, "Device manager\n" );
643 static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
645 struct device_manager *manager = (struct device_manager *)obj;
647 return !list_empty( &manager->requests );
650 static void device_manager_destroy( struct object *obj )
652 struct device_manager *manager = (struct device_manager *)obj;
653 struct list *ptr;
655 while ((ptr = list_head( &manager->devices )))
657 struct device *device = LIST_ENTRY( ptr, struct device, entry );
658 delete_device( device );
662 static struct device_manager *create_device_manager(void)
664 struct device_manager *manager;
666 if ((manager = alloc_object( &device_manager_ops )))
668 list_init( &manager->devices );
669 list_init( &manager->requests );
671 return manager;
675 /* create a device manager */
676 DECL_HANDLER(create_device_manager)
678 struct device_manager *manager = create_device_manager();
680 if (manager)
682 reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
683 release_object( manager );
688 /* create a device */
689 DECL_HANDLER(create_device)
691 struct device *device;
692 struct unicode_str name = get_req_unicode_str();
693 struct device_manager *manager;
694 struct object *root = NULL;
696 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
697 0, &device_manager_ops )))
698 return;
700 if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
702 release_object( manager );
703 return;
706 if ((device = create_device( root, &name, manager, req->attributes )))
708 device->user_ptr = req->user_ptr;
709 reply->handle = alloc_handle( current->process, device, req->access, req->attributes );
710 release_object( device );
713 if (root) release_object( root );
714 release_object( manager );
718 /* delete a device */
719 DECL_HANDLER(delete_device)
721 struct device *device;
723 if ((device = (struct device *)get_handle_obj( current->process, req->handle, 0, &device_ops )))
725 delete_device( device );
726 release_object( device );
731 /* retrieve the next pending device irp request */
732 DECL_HANDLER(get_next_device_request)
734 struct irp_call *irp;
735 struct device_manager *manager;
736 struct list *ptr;
737 struct iosb *iosb;
739 reply->params.major = IRP_MJ_MAXIMUM_FUNCTION + 1;
741 if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
742 0, &device_manager_ops )))
743 return;
745 if (req->prev)
747 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->prev, 0, &irp_call_ops )))
749 set_irp_result( irp, req->status, NULL, 0, 0 );
750 close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
751 release_object( irp );
753 clear_error();
756 if ((ptr = list_head( &manager->requests )))
758 irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
759 if (irp->thread)
761 reply->client_pid = get_process_id( irp->thread->process );
762 reply->client_tid = get_thread_id( irp->thread );
764 reply->params = irp->params;
765 iosb = irp->iosb;
766 reply->in_size = iosb->in_size;
767 reply->out_size = iosb->out_size;
768 if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
769 else if ((reply->next = alloc_handle( current->process, irp, 0, 0 )))
771 set_reply_data_ptr( iosb->in_data, iosb->in_size );
772 iosb->in_data = NULL;
773 iosb->in_size = 0;
774 list_remove( &irp->mgr_entry );
775 list_init( &irp->mgr_entry );
778 else set_error( STATUS_PENDING );
780 release_object( manager );
784 /* store results of an async irp */
785 DECL_HANDLER(set_irp_result)
787 struct irp_call *irp;
789 if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
791 if (irp->file) set_file_user_ptr( irp->file, req->file_ptr );
792 set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
793 close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
794 release_object( irp );