2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-cdev.h>
28 #include <linux/idr.h>
29 #include <linux/irqflags.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
34 #include <linux/module.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/spinlock.h>
39 #include <linux/string.h>
40 #include <linux/time.h>
41 #include <linux/uaccess.h>
42 #include <linux/vmalloc.h>
43 #include <linux/wait.h>
44 #include <linux/workqueue.h>
46 #include <asm/system.h>
51 * ABI version history is documented in linux/firewire-cdev.h.
53 #define FW_CDEV_KERNEL_VERSION 4
54 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
55 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
59 struct fw_device
*device
;
63 struct idr resource_idr
;
64 struct list_head event_list
;
65 wait_queue_head_t wait
;
66 u64 bus_reset_closure
;
68 struct fw_iso_context
*iso_context
;
70 struct fw_iso_buffer buffer
;
71 unsigned long vm_start
;
73 struct list_head phy_receiver_link
;
74 u64 phy_receiver_closure
;
76 struct list_head link
;
80 static inline void client_get(struct client
*client
)
82 kref_get(&client
->kref
);
85 static void client_release(struct kref
*kref
)
87 struct client
*client
= container_of(kref
, struct client
, kref
);
89 fw_device_put(client
->device
);
93 static void client_put(struct client
*client
)
95 kref_put(&client
->kref
, client_release
);
98 struct client_resource
;
99 typedef void (*client_resource_release_fn_t
)(struct client
*,
100 struct client_resource
*);
101 struct client_resource
{
102 client_resource_release_fn_t release
;
106 struct address_handler_resource
{
107 struct client_resource resource
;
108 struct fw_address_handler handler
;
110 struct client
*client
;
113 struct outbound_transaction_resource
{
114 struct client_resource resource
;
115 struct fw_transaction transaction
;
118 struct inbound_transaction_resource
{
119 struct client_resource resource
;
120 struct fw_card
*card
;
121 struct fw_request
*request
;
126 struct descriptor_resource
{
127 struct client_resource resource
;
128 struct fw_descriptor descriptor
;
132 struct iso_resource
{
133 struct client_resource resource
;
134 struct client
*client
;
135 /* Schedule work and access todo only with client->lock held. */
136 struct delayed_work work
;
137 enum {ISO_RES_ALLOC
, ISO_RES_REALLOC
, ISO_RES_DEALLOC
,
138 ISO_RES_ALLOC_ONCE
, ISO_RES_DEALLOC_ONCE
,} todo
;
142 __be32 transaction_data
[2];
143 struct iso_resource_event
*e_alloc
, *e_dealloc
;
146 static void release_iso_resource(struct client
*, struct client_resource
*);
148 static void schedule_iso_resource(struct iso_resource
*r
, unsigned long delay
)
150 client_get(r
->client
);
151 if (!schedule_delayed_work(&r
->work
, delay
))
152 client_put(r
->client
);
155 static void schedule_if_iso_resource(struct client_resource
*resource
)
157 if (resource
->release
== release_iso_resource
)
158 schedule_iso_resource(container_of(resource
,
159 struct iso_resource
, resource
), 0);
163 * dequeue_event() just kfree()'s the event, so the event has to be
164 * the first field in a struct XYZ_event.
167 struct { void *data
; size_t size
; } v
[2];
168 struct list_head link
;
171 struct bus_reset_event
{
173 struct fw_cdev_event_bus_reset reset
;
176 struct outbound_transaction_event
{
178 struct client
*client
;
179 struct outbound_transaction_resource r
;
180 struct fw_cdev_event_response response
;
183 struct inbound_transaction_event
{
186 struct fw_cdev_event_request request
;
187 struct fw_cdev_event_request2 request2
;
191 struct iso_interrupt_event
{
193 struct fw_cdev_event_iso_interrupt interrupt
;
196 struct iso_resource_event
{
198 struct fw_cdev_event_iso_resource iso_resource
;
201 struct outbound_phy_packet_event
{
203 struct client
*client
;
205 struct fw_cdev_event_phy_packet phy_packet
;
208 struct inbound_phy_packet_event
{
210 struct fw_cdev_event_phy_packet phy_packet
;
213 static inline void __user
*u64_to_uptr(__u64 value
)
215 return (void __user
*)(unsigned long)value
;
218 static inline __u64
uptr_to_u64(void __user
*ptr
)
220 return (__u64
)(unsigned long)ptr
;
223 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
225 struct fw_device
*device
;
226 struct client
*client
;
228 device
= fw_device_get_by_devt(inode
->i_rdev
);
232 if (fw_device_is_shutdown(device
)) {
233 fw_device_put(device
);
237 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
238 if (client
== NULL
) {
239 fw_device_put(device
);
243 client
->device
= device
;
244 spin_lock_init(&client
->lock
);
245 idr_init(&client
->resource_idr
);
246 INIT_LIST_HEAD(&client
->event_list
);
247 init_waitqueue_head(&client
->wait
);
248 INIT_LIST_HEAD(&client
->phy_receiver_link
);
249 kref_init(&client
->kref
);
251 file
->private_data
= client
;
253 mutex_lock(&device
->client_list_mutex
);
254 list_add_tail(&client
->link
, &device
->client_list
);
255 mutex_unlock(&device
->client_list_mutex
);
257 return nonseekable_open(inode
, file
);
260 static void queue_event(struct client
*client
, struct event
*event
,
261 void *data0
, size_t size0
, void *data1
, size_t size1
)
265 event
->v
[0].data
= data0
;
266 event
->v
[0].size
= size0
;
267 event
->v
[1].data
= data1
;
268 event
->v
[1].size
= size1
;
270 spin_lock_irqsave(&client
->lock
, flags
);
271 if (client
->in_shutdown
)
274 list_add_tail(&event
->link
, &client
->event_list
);
275 spin_unlock_irqrestore(&client
->lock
, flags
);
277 wake_up_interruptible(&client
->wait
);
280 static int dequeue_event(struct client
*client
,
281 char __user
*buffer
, size_t count
)
287 ret
= wait_event_interruptible(client
->wait
,
288 !list_empty(&client
->event_list
) ||
289 fw_device_is_shutdown(client
->device
));
293 if (list_empty(&client
->event_list
) &&
294 fw_device_is_shutdown(client
->device
))
297 spin_lock_irq(&client
->lock
);
298 event
= list_first_entry(&client
->event_list
, struct event
, link
);
299 list_del(&event
->link
);
300 spin_unlock_irq(&client
->lock
);
303 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
304 size
= min(event
->v
[i
].size
, count
- total
);
305 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
)) {
319 static ssize_t
fw_device_op_read(struct file
*file
, char __user
*buffer
,
320 size_t count
, loff_t
*offset
)
322 struct client
*client
= file
->private_data
;
324 return dequeue_event(client
, buffer
, count
);
327 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset
*event
,
328 struct client
*client
)
330 struct fw_card
*card
= client
->device
->card
;
332 spin_lock_irq(&card
->lock
);
334 event
->closure
= client
->bus_reset_closure
;
335 event
->type
= FW_CDEV_EVENT_BUS_RESET
;
336 event
->generation
= client
->device
->generation
;
337 event
->node_id
= client
->device
->node_id
;
338 event
->local_node_id
= card
->local_node
->node_id
;
339 event
->bm_node_id
= card
->bm_node_id
;
340 event
->irm_node_id
= card
->irm_node
->node_id
;
341 event
->root_node_id
= card
->root_node
->node_id
;
343 spin_unlock_irq(&card
->lock
);
346 static void for_each_client(struct fw_device
*device
,
347 void (*callback
)(struct client
*client
))
351 mutex_lock(&device
->client_list_mutex
);
352 list_for_each_entry(c
, &device
->client_list
, link
)
354 mutex_unlock(&device
->client_list_mutex
);
357 static int schedule_reallocations(int id
, void *p
, void *data
)
359 schedule_if_iso_resource(p
);
364 static void queue_bus_reset_event(struct client
*client
)
366 struct bus_reset_event
*e
;
368 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
370 fw_notify("Out of memory when allocating event\n");
374 fill_bus_reset_event(&e
->reset
, client
);
376 queue_event(client
, &e
->event
,
377 &e
->reset
, sizeof(e
->reset
), NULL
, 0);
379 spin_lock_irq(&client
->lock
);
380 idr_for_each(&client
->resource_idr
, schedule_reallocations
, client
);
381 spin_unlock_irq(&client
->lock
);
384 void fw_device_cdev_update(struct fw_device
*device
)
386 for_each_client(device
, queue_bus_reset_event
);
389 static void wake_up_client(struct client
*client
)
391 wake_up_interruptible(&client
->wait
);
394 void fw_device_cdev_remove(struct fw_device
*device
)
396 for_each_client(device
, wake_up_client
);
400 struct fw_cdev_get_info get_info
;
401 struct fw_cdev_send_request send_request
;
402 struct fw_cdev_allocate allocate
;
403 struct fw_cdev_deallocate deallocate
;
404 struct fw_cdev_send_response send_response
;
405 struct fw_cdev_initiate_bus_reset initiate_bus_reset
;
406 struct fw_cdev_add_descriptor add_descriptor
;
407 struct fw_cdev_remove_descriptor remove_descriptor
;
408 struct fw_cdev_create_iso_context create_iso_context
;
409 struct fw_cdev_queue_iso queue_iso
;
410 struct fw_cdev_start_iso start_iso
;
411 struct fw_cdev_stop_iso stop_iso
;
412 struct fw_cdev_get_cycle_timer get_cycle_timer
;
413 struct fw_cdev_allocate_iso_resource allocate_iso_resource
;
414 struct fw_cdev_send_stream_packet send_stream_packet
;
415 struct fw_cdev_get_cycle_timer2 get_cycle_timer2
;
416 struct fw_cdev_send_phy_packet send_phy_packet
;
417 struct fw_cdev_receive_phy_packets receive_phy_packets
;
420 static int ioctl_get_info(struct client
*client
, union ioctl_arg
*arg
)
422 struct fw_cdev_get_info
*a
= &arg
->get_info
;
423 struct fw_cdev_event_bus_reset bus_reset
;
424 unsigned long ret
= 0;
426 client
->version
= a
->version
;
427 a
->version
= FW_CDEV_KERNEL_VERSION
;
428 a
->card
= client
->device
->card
->index
;
430 down_read(&fw_device_rwsem
);
433 size_t want
= a
->rom_length
;
434 size_t have
= client
->device
->config_rom_length
* 4;
436 ret
= copy_to_user(u64_to_uptr(a
->rom
),
437 client
->device
->config_rom
, min(want
, have
));
439 a
->rom_length
= client
->device
->config_rom_length
* 4;
441 up_read(&fw_device_rwsem
);
446 client
->bus_reset_closure
= a
->bus_reset_closure
;
447 if (a
->bus_reset
!= 0) {
448 fill_bus_reset_event(&bus_reset
, client
);
449 if (copy_to_user(u64_to_uptr(a
->bus_reset
),
450 &bus_reset
, sizeof(bus_reset
)))
457 static int add_client_resource(struct client
*client
,
458 struct client_resource
*resource
, gfp_t gfp_mask
)
464 if (idr_pre_get(&client
->resource_idr
, gfp_mask
) == 0)
467 spin_lock_irqsave(&client
->lock
, flags
);
468 if (client
->in_shutdown
)
471 ret
= idr_get_new(&client
->resource_idr
, resource
,
475 schedule_if_iso_resource(resource
);
477 spin_unlock_irqrestore(&client
->lock
, flags
);
482 return ret
< 0 ? ret
: 0;
485 static int release_client_resource(struct client
*client
, u32 handle
,
486 client_resource_release_fn_t release
,
487 struct client_resource
**return_resource
)
489 struct client_resource
*resource
;
491 spin_lock_irq(&client
->lock
);
492 if (client
->in_shutdown
)
495 resource
= idr_find(&client
->resource_idr
, handle
);
496 if (resource
&& resource
->release
== release
)
497 idr_remove(&client
->resource_idr
, handle
);
498 spin_unlock_irq(&client
->lock
);
500 if (!(resource
&& resource
->release
== release
))
504 *return_resource
= resource
;
506 resource
->release(client
, resource
);
513 static void release_transaction(struct client
*client
,
514 struct client_resource
*resource
)
516 struct outbound_transaction_resource
*r
= container_of(resource
,
517 struct outbound_transaction_resource
, resource
);
519 fw_cancel_transaction(client
->device
->card
, &r
->transaction
);
522 static void complete_transaction(struct fw_card
*card
, int rcode
,
523 void *payload
, size_t length
, void *data
)
525 struct outbound_transaction_event
*e
= data
;
526 struct fw_cdev_event_response
*rsp
= &e
->response
;
527 struct client
*client
= e
->client
;
530 if (length
< rsp
->length
)
531 rsp
->length
= length
;
532 if (rcode
== RCODE_COMPLETE
)
533 memcpy(rsp
->data
, payload
, rsp
->length
);
535 spin_lock_irqsave(&client
->lock
, flags
);
537 * 1. If called while in shutdown, the idr tree must be left untouched.
538 * The idr handle will be removed and the client reference will be
540 * 2. If the call chain was release_client_resource ->
541 * release_transaction -> complete_transaction (instead of a normal
542 * conclusion of the transaction), i.e. if this resource was already
543 * unregistered from the idr, the client reference will be dropped
544 * by release_client_resource and we must not drop it here.
546 if (!client
->in_shutdown
&&
547 idr_find(&client
->resource_idr
, e
->r
.resource
.handle
)) {
548 idr_remove(&client
->resource_idr
, e
->r
.resource
.handle
);
549 /* Drop the idr's reference */
552 spin_unlock_irqrestore(&client
->lock
, flags
);
554 rsp
->type
= FW_CDEV_EVENT_RESPONSE
;
558 * In the case that sizeof(*rsp) doesn't align with the position of the
559 * data, and the read is short, preserve an extra copy of the data
560 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
561 * for short reads and some apps depended on it, this is both safe
562 * and prudent for compatibility.
564 if (rsp
->length
<= sizeof(*rsp
) - offsetof(typeof(*rsp
), data
))
565 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
),
566 rsp
->data
, rsp
->length
);
568 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
) + rsp
->length
,
571 /* Drop the transaction callback's reference */
575 static int init_request(struct client
*client
,
576 struct fw_cdev_send_request
*request
,
577 int destination_id
, int speed
)
579 struct outbound_transaction_event
*e
;
582 if (request
->tcode
!= TCODE_STREAM_DATA
&&
583 (request
->length
> 4096 || request
->length
> 512 << speed
))
586 if (request
->tcode
== TCODE_WRITE_QUADLET_REQUEST
&&
590 e
= kmalloc(sizeof(*e
) + request
->length
, GFP_KERNEL
);
595 e
->response
.length
= request
->length
;
596 e
->response
.closure
= request
->closure
;
599 copy_from_user(e
->response
.data
,
600 u64_to_uptr(request
->data
), request
->length
)) {
605 e
->r
.resource
.release
= release_transaction
;
606 ret
= add_client_resource(client
, &e
->r
.resource
, GFP_KERNEL
);
610 /* Get a reference for the transaction callback */
613 fw_send_request(client
->device
->card
, &e
->r
.transaction
,
614 request
->tcode
, destination_id
, request
->generation
,
615 speed
, request
->offset
, e
->response
.data
,
616 request
->length
, complete_transaction
, e
);
625 static int ioctl_send_request(struct client
*client
, union ioctl_arg
*arg
)
627 switch (arg
->send_request
.tcode
) {
628 case TCODE_WRITE_QUADLET_REQUEST
:
629 case TCODE_WRITE_BLOCK_REQUEST
:
630 case TCODE_READ_QUADLET_REQUEST
:
631 case TCODE_READ_BLOCK_REQUEST
:
632 case TCODE_LOCK_MASK_SWAP
:
633 case TCODE_LOCK_COMPARE_SWAP
:
634 case TCODE_LOCK_FETCH_ADD
:
635 case TCODE_LOCK_LITTLE_ADD
:
636 case TCODE_LOCK_BOUNDED_ADD
:
637 case TCODE_LOCK_WRAP_ADD
:
638 case TCODE_LOCK_VENDOR_DEPENDENT
:
644 return init_request(client
, &arg
->send_request
, client
->device
->node_id
,
645 client
->device
->max_speed
);
648 static inline bool is_fcp_request(struct fw_request
*request
)
650 return request
== NULL
;
653 static void release_request(struct client
*client
,
654 struct client_resource
*resource
)
656 struct inbound_transaction_resource
*r
= container_of(resource
,
657 struct inbound_transaction_resource
, resource
);
659 if (is_fcp_request(r
->request
))
662 fw_send_response(r
->card
, r
->request
, RCODE_CONFLICT_ERROR
);
664 fw_card_put(r
->card
);
668 static void handle_request(struct fw_card
*card
, struct fw_request
*request
,
669 int tcode
, int destination
, int source
,
670 int generation
, unsigned long long offset
,
671 void *payload
, size_t length
, void *callback_data
)
673 struct address_handler_resource
*handler
= callback_data
;
674 struct inbound_transaction_resource
*r
;
675 struct inbound_transaction_event
*e
;
677 void *fcp_frame
= NULL
;
680 /* card may be different from handler->client->device->card */
683 r
= kmalloc(sizeof(*r
), GFP_ATOMIC
);
684 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
685 if (r
== NULL
|| e
== NULL
) {
686 fw_notify("Out of memory when allocating event\n");
690 r
->request
= request
;
694 if (is_fcp_request(request
)) {
696 * FIXME: Let core-transaction.c manage a
697 * single reference-counted copy?
699 fcp_frame
= kmemdup(payload
, length
, GFP_ATOMIC
);
700 if (fcp_frame
== NULL
)
706 r
->resource
.release
= release_request
;
707 ret
= add_client_resource(handler
->client
, &r
->resource
, GFP_ATOMIC
);
711 if (handler
->client
->version
< FW_CDEV_VERSION_EVENT_REQUEST2
) {
712 struct fw_cdev_event_request
*req
= &e
->req
.request
;
715 tcode
= TCODE_LOCK_REQUEST
;
717 req
->type
= FW_CDEV_EVENT_REQUEST
;
719 req
->offset
= offset
;
720 req
->length
= length
;
721 req
->handle
= r
->resource
.handle
;
722 req
->closure
= handler
->closure
;
723 event_size0
= sizeof(*req
);
725 struct fw_cdev_event_request2
*req
= &e
->req
.request2
;
727 req
->type
= FW_CDEV_EVENT_REQUEST2
;
729 req
->offset
= offset
;
730 req
->source_node_id
= source
;
731 req
->destination_node_id
= destination
;
732 req
->card
= card
->index
;
733 req
->generation
= generation
;
734 req
->length
= length
;
735 req
->handle
= r
->resource
.handle
;
736 req
->closure
= handler
->closure
;
737 event_size0
= sizeof(*req
);
740 queue_event(handler
->client
, &e
->event
,
741 &e
->req
, event_size0
, r
->data
, length
);
749 if (!is_fcp_request(request
))
750 fw_send_response(card
, request
, RCODE_CONFLICT_ERROR
);
755 static void release_address_handler(struct client
*client
,
756 struct client_resource
*resource
)
758 struct address_handler_resource
*r
=
759 container_of(resource
, struct address_handler_resource
, resource
);
761 fw_core_remove_address_handler(&r
->handler
);
765 static int ioctl_allocate(struct client
*client
, union ioctl_arg
*arg
)
767 struct fw_cdev_allocate
*a
= &arg
->allocate
;
768 struct address_handler_resource
*r
;
769 struct fw_address_region region
;
772 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
776 region
.start
= a
->offset
;
777 if (client
->version
< FW_CDEV_VERSION_ALLOCATE_REGION_END
)
778 region
.end
= a
->offset
+ a
->length
;
780 region
.end
= a
->region_end
;
782 r
->handler
.length
= a
->length
;
783 r
->handler
.address_callback
= handle_request
;
784 r
->handler
.callback_data
= r
;
785 r
->closure
= a
->closure
;
788 ret
= fw_core_add_address_handler(&r
->handler
, ®ion
);
793 a
->offset
= r
->handler
.offset
;
795 r
->resource
.release
= release_address_handler
;
796 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
798 release_address_handler(client
, &r
->resource
);
801 a
->handle
= r
->resource
.handle
;
806 static int ioctl_deallocate(struct client
*client
, union ioctl_arg
*arg
)
808 return release_client_resource(client
, arg
->deallocate
.handle
,
809 release_address_handler
, NULL
);
812 static int ioctl_send_response(struct client
*client
, union ioctl_arg
*arg
)
814 struct fw_cdev_send_response
*a
= &arg
->send_response
;
815 struct client_resource
*resource
;
816 struct inbound_transaction_resource
*r
;
819 if (release_client_resource(client
, a
->handle
,
820 release_request
, &resource
) < 0)
823 r
= container_of(resource
, struct inbound_transaction_resource
,
825 if (is_fcp_request(r
->request
))
828 if (a
->length
!= fw_get_response_length(r
->request
)) {
833 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
)) {
838 fw_send_response(r
->card
, r
->request
, a
->rcode
);
840 fw_card_put(r
->card
);
846 static int ioctl_initiate_bus_reset(struct client
*client
, union ioctl_arg
*arg
)
848 fw_schedule_bus_reset(client
->device
->card
, true,
849 arg
->initiate_bus_reset
.type
== FW_CDEV_SHORT_RESET
);
853 static void release_descriptor(struct client
*client
,
854 struct client_resource
*resource
)
856 struct descriptor_resource
*r
=
857 container_of(resource
, struct descriptor_resource
, resource
);
859 fw_core_remove_descriptor(&r
->descriptor
);
863 static int ioctl_add_descriptor(struct client
*client
, union ioctl_arg
*arg
)
865 struct fw_cdev_add_descriptor
*a
= &arg
->add_descriptor
;
866 struct descriptor_resource
*r
;
869 /* Access policy: Allow this ioctl only on local nodes' device files. */
870 if (!client
->device
->is_local
)
876 r
= kmalloc(sizeof(*r
) + a
->length
* 4, GFP_KERNEL
);
880 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
* 4)) {
885 r
->descriptor
.length
= a
->length
;
886 r
->descriptor
.immediate
= a
->immediate
;
887 r
->descriptor
.key
= a
->key
;
888 r
->descriptor
.data
= r
->data
;
890 ret
= fw_core_add_descriptor(&r
->descriptor
);
894 r
->resource
.release
= release_descriptor
;
895 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
897 fw_core_remove_descriptor(&r
->descriptor
);
900 a
->handle
= r
->resource
.handle
;
909 static int ioctl_remove_descriptor(struct client
*client
, union ioctl_arg
*arg
)
911 return release_client_resource(client
, arg
->remove_descriptor
.handle
,
912 release_descriptor
, NULL
);
915 static void iso_callback(struct fw_iso_context
*context
, u32 cycle
,
916 size_t header_length
, void *header
, void *data
)
918 struct client
*client
= data
;
919 struct iso_interrupt_event
*e
;
921 e
= kmalloc(sizeof(*e
) + header_length
, GFP_ATOMIC
);
923 fw_notify("Out of memory when allocating event\n");
926 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
927 e
->interrupt
.closure
= client
->iso_closure
;
928 e
->interrupt
.cycle
= cycle
;
929 e
->interrupt
.header_length
= header_length
;
930 memcpy(e
->interrupt
.header
, header
, header_length
);
931 queue_event(client
, &e
->event
, &e
->interrupt
,
932 sizeof(e
->interrupt
) + header_length
, NULL
, 0);
935 static int ioctl_create_iso_context(struct client
*client
, union ioctl_arg
*arg
)
937 struct fw_cdev_create_iso_context
*a
= &arg
->create_iso_context
;
938 struct fw_iso_context
*context
;
940 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT
!= FW_ISO_CONTEXT_TRANSMIT
||
941 FW_CDEV_ISO_CONTEXT_RECEIVE
!= FW_ISO_CONTEXT_RECEIVE
);
947 case FW_ISO_CONTEXT_RECEIVE
:
948 if (a
->header_size
< 4 || (a
->header_size
& 3))
952 case FW_ISO_CONTEXT_TRANSMIT
:
953 if (a
->speed
> SCODE_3200
)
961 context
= fw_iso_context_create(client
->device
->card
, a
->type
,
962 a
->channel
, a
->speed
, a
->header_size
,
963 iso_callback
, client
);
965 return PTR_ERR(context
);
967 /* We only support one context at this time. */
968 spin_lock_irq(&client
->lock
);
969 if (client
->iso_context
!= NULL
) {
970 spin_unlock_irq(&client
->lock
);
971 fw_iso_context_destroy(context
);
974 client
->iso_closure
= a
->closure
;
975 client
->iso_context
= context
;
976 spin_unlock_irq(&client
->lock
);
983 /* Macros for decoding the iso packet control header. */
984 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
985 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
986 #define GET_SKIP(v) (((v) >> 17) & 0x01)
987 #define GET_TAG(v) (((v) >> 18) & 0x03)
988 #define GET_SY(v) (((v) >> 20) & 0x0f)
989 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
991 static int ioctl_queue_iso(struct client
*client
, union ioctl_arg
*arg
)
993 struct fw_cdev_queue_iso
*a
= &arg
->queue_iso
;
994 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
995 struct fw_iso_context
*ctx
= client
->iso_context
;
996 unsigned long payload
, buffer_end
, header_length
;
1000 struct fw_iso_packet packet
;
1004 if (ctx
== NULL
|| a
->handle
!= 0)
1008 * If the user passes a non-NULL data pointer, has mmap()'ed
1009 * the iso buffer, and the pointer points inside the buffer,
1010 * we setup the payload pointers accordingly. Otherwise we
1011 * set them both to 0, which will still let packets with
1012 * payload_length == 0 through. In other words, if no packets
1013 * use the indirect payload, the iso buffer need not be mapped
1014 * and the a->data pointer is ignored.
1017 payload
= (unsigned long)a
->data
- client
->vm_start
;
1018 buffer_end
= client
->buffer
.page_count
<< PAGE_SHIFT
;
1019 if (a
->data
== 0 || client
->buffer
.pages
== NULL
||
1020 payload
>= buffer_end
) {
1025 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(a
->packets
);
1027 if (!access_ok(VERIFY_READ
, p
, a
->size
))
1030 end
= (void __user
*)p
+ a
->size
;
1033 if (get_user(control
, &p
->control
))
1035 u
.packet
.payload_length
= GET_PAYLOAD_LENGTH(control
);
1036 u
.packet
.interrupt
= GET_INTERRUPT(control
);
1037 u
.packet
.skip
= GET_SKIP(control
);
1038 u
.packet
.tag
= GET_TAG(control
);
1039 u
.packet
.sy
= GET_SY(control
);
1040 u
.packet
.header_length
= GET_HEADER_LENGTH(control
);
1042 if (ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
) {
1043 if (u
.packet
.header_length
% 4 != 0)
1045 header_length
= u
.packet
.header_length
;
1048 * We require that header_length is a multiple of
1049 * the fixed header size, ctx->header_size.
1051 if (u
.packet
.header_length
== 0 ||
1052 u
.packet
.header_length
% ctx
->header_size
!= 0)
1057 next
= (struct fw_cdev_iso_packet __user
*)
1058 &p
->header
[header_length
/ 4];
1061 if (__copy_from_user
1062 (u
.packet
.header
, p
->header
, header_length
))
1064 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
1065 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
1067 if (payload
+ u
.packet
.payload_length
> buffer_end
)
1070 if (fw_iso_context_queue(ctx
, &u
.packet
,
1071 &client
->buffer
, payload
))
1075 payload
+= u
.packet
.payload_length
;
1079 a
->size
-= uptr_to_u64(p
) - a
->packets
;
1080 a
->packets
= uptr_to_u64(p
);
1081 a
->data
= client
->vm_start
+ payload
;
1086 static int ioctl_start_iso(struct client
*client
, union ioctl_arg
*arg
)
1088 struct fw_cdev_start_iso
*a
= &arg
->start_iso
;
1091 FW_CDEV_ISO_CONTEXT_MATCH_TAG0
!= FW_ISO_CONTEXT_MATCH_TAG0
||
1092 FW_CDEV_ISO_CONTEXT_MATCH_TAG1
!= FW_ISO_CONTEXT_MATCH_TAG1
||
1093 FW_CDEV_ISO_CONTEXT_MATCH_TAG2
!= FW_ISO_CONTEXT_MATCH_TAG2
||
1094 FW_CDEV_ISO_CONTEXT_MATCH_TAG3
!= FW_ISO_CONTEXT_MATCH_TAG3
||
1095 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS
!= FW_ISO_CONTEXT_MATCH_ALL_TAGS
);
1097 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1100 if (client
->iso_context
->type
== FW_ISO_CONTEXT_RECEIVE
&&
1101 (a
->tags
== 0 || a
->tags
> 15 || a
->sync
> 15))
1104 return fw_iso_context_start(client
->iso_context
,
1105 a
->cycle
, a
->sync
, a
->tags
);
1108 static int ioctl_stop_iso(struct client
*client
, union ioctl_arg
*arg
)
1110 struct fw_cdev_stop_iso
*a
= &arg
->stop_iso
;
1112 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1115 return fw_iso_context_stop(client
->iso_context
);
1118 static int ioctl_get_cycle_timer2(struct client
*client
, union ioctl_arg
*arg
)
1120 struct fw_cdev_get_cycle_timer2
*a
= &arg
->get_cycle_timer2
;
1121 struct fw_card
*card
= client
->device
->card
;
1122 struct timespec ts
= {0, 0};
1126 local_irq_disable();
1128 cycle_time
= card
->driver
->read_csr(card
, CSR_CYCLE_TIME
);
1130 switch (a
->clk_id
) {
1131 case CLOCK_REALTIME
: getnstimeofday(&ts
); break;
1132 case CLOCK_MONOTONIC
: do_posix_clock_monotonic_gettime(&ts
); break;
1133 case CLOCK_MONOTONIC_RAW
: getrawmonotonic(&ts
); break;
1140 a
->tv_sec
= ts
.tv_sec
;
1141 a
->tv_nsec
= ts
.tv_nsec
;
1142 a
->cycle_timer
= cycle_time
;
1147 static int ioctl_get_cycle_timer(struct client
*client
, union ioctl_arg
*arg
)
1149 struct fw_cdev_get_cycle_timer
*a
= &arg
->get_cycle_timer
;
1150 struct fw_cdev_get_cycle_timer2 ct2
;
1152 ct2
.clk_id
= CLOCK_REALTIME
;
1153 ioctl_get_cycle_timer2(client
, (union ioctl_arg
*)&ct2
);
1155 a
->local_time
= ct2
.tv_sec
* USEC_PER_SEC
+ ct2
.tv_nsec
/ NSEC_PER_USEC
;
1156 a
->cycle_timer
= ct2
.cycle_timer
;
1161 static void iso_resource_work(struct work_struct
*work
)
1163 struct iso_resource_event
*e
;
1164 struct iso_resource
*r
=
1165 container_of(work
, struct iso_resource
, work
.work
);
1166 struct client
*client
= r
->client
;
1167 int generation
, channel
, bandwidth
, todo
;
1168 bool skip
, free
, success
;
1170 spin_lock_irq(&client
->lock
);
1171 generation
= client
->device
->generation
;
1173 /* Allow 1000ms grace period for other reallocations. */
1174 if (todo
== ISO_RES_ALLOC
&&
1175 time_is_after_jiffies(client
->device
->card
->reset_jiffies
+ HZ
)) {
1176 schedule_iso_resource(r
, DIV_ROUND_UP(HZ
, 3));
1179 /* We could be called twice within the same generation. */
1180 skip
= todo
== ISO_RES_REALLOC
&&
1181 r
->generation
== generation
;
1183 free
= todo
== ISO_RES_DEALLOC
||
1184 todo
== ISO_RES_ALLOC_ONCE
||
1185 todo
== ISO_RES_DEALLOC_ONCE
;
1186 r
->generation
= generation
;
1187 spin_unlock_irq(&client
->lock
);
1192 bandwidth
= r
->bandwidth
;
1194 fw_iso_resource_manage(client
->device
->card
, generation
,
1195 r
->channels
, &channel
, &bandwidth
,
1196 todo
== ISO_RES_ALLOC
||
1197 todo
== ISO_RES_REALLOC
||
1198 todo
== ISO_RES_ALLOC_ONCE
,
1199 r
->transaction_data
);
1201 * Is this generation outdated already? As long as this resource sticks
1202 * in the idr, it will be scheduled again for a newer generation or at
1205 if (channel
== -EAGAIN
&&
1206 (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_REALLOC
))
1209 success
= channel
>= 0 || bandwidth
> 0;
1211 spin_lock_irq(&client
->lock
);
1213 * Transit from allocation to reallocation, except if the client
1214 * requested deallocation in the meantime.
1216 if (r
->todo
== ISO_RES_ALLOC
)
1217 r
->todo
= ISO_RES_REALLOC
;
1219 * Allocation or reallocation failure? Pull this resource out of the
1220 * idr and prepare for deletion, unless the client is shutting down.
1222 if (r
->todo
== ISO_RES_REALLOC
&& !success
&&
1223 !client
->in_shutdown
&&
1224 idr_find(&client
->resource_idr
, r
->resource
.handle
)) {
1225 idr_remove(&client
->resource_idr
, r
->resource
.handle
);
1229 spin_unlock_irq(&client
->lock
);
1231 if (todo
== ISO_RES_ALLOC
&& channel
>= 0)
1232 r
->channels
= 1ULL << channel
;
1234 if (todo
== ISO_RES_REALLOC
&& success
)
1237 if (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_ALLOC_ONCE
) {
1242 r
->e_dealloc
= NULL
;
1244 e
->iso_resource
.handle
= r
->resource
.handle
;
1245 e
->iso_resource
.channel
= channel
;
1246 e
->iso_resource
.bandwidth
= bandwidth
;
1248 queue_event(client
, &e
->event
,
1249 &e
->iso_resource
, sizeof(e
->iso_resource
), NULL
, 0);
1252 cancel_delayed_work(&r
->work
);
1254 kfree(r
->e_dealloc
);
1261 static void release_iso_resource(struct client
*client
,
1262 struct client_resource
*resource
)
1264 struct iso_resource
*r
=
1265 container_of(resource
, struct iso_resource
, resource
);
1267 spin_lock_irq(&client
->lock
);
1268 r
->todo
= ISO_RES_DEALLOC
;
1269 schedule_iso_resource(r
, 0);
1270 spin_unlock_irq(&client
->lock
);
1273 static int init_iso_resource(struct client
*client
,
1274 struct fw_cdev_allocate_iso_resource
*request
, int todo
)
1276 struct iso_resource_event
*e1
, *e2
;
1277 struct iso_resource
*r
;
1280 if ((request
->channels
== 0 && request
->bandwidth
== 0) ||
1281 request
->bandwidth
> BANDWIDTH_AVAILABLE_INITIAL
||
1282 request
->bandwidth
< 0)
1285 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
1286 e1
= kmalloc(sizeof(*e1
), GFP_KERNEL
);
1287 e2
= kmalloc(sizeof(*e2
), GFP_KERNEL
);
1288 if (r
== NULL
|| e1
== NULL
|| e2
== NULL
) {
1293 INIT_DELAYED_WORK(&r
->work
, iso_resource_work
);
1297 r
->channels
= request
->channels
;
1298 r
->bandwidth
= request
->bandwidth
;
1302 e1
->iso_resource
.closure
= request
->closure
;
1303 e1
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED
;
1304 e2
->iso_resource
.closure
= request
->closure
;
1305 e2
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
;
1307 if (todo
== ISO_RES_ALLOC
) {
1308 r
->resource
.release
= release_iso_resource
;
1309 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
1313 r
->resource
.release
= NULL
;
1314 r
->resource
.handle
= -1;
1315 schedule_iso_resource(r
, 0);
1317 request
->handle
= r
->resource
.handle
;
1328 static int ioctl_allocate_iso_resource(struct client
*client
,
1329 union ioctl_arg
*arg
)
1331 return init_iso_resource(client
,
1332 &arg
->allocate_iso_resource
, ISO_RES_ALLOC
);
1335 static int ioctl_deallocate_iso_resource(struct client
*client
,
1336 union ioctl_arg
*arg
)
1338 return release_client_resource(client
,
1339 arg
->deallocate
.handle
, release_iso_resource
, NULL
);
1342 static int ioctl_allocate_iso_resource_once(struct client
*client
,
1343 union ioctl_arg
*arg
)
1345 return init_iso_resource(client
,
1346 &arg
->allocate_iso_resource
, ISO_RES_ALLOC_ONCE
);
1349 static int ioctl_deallocate_iso_resource_once(struct client
*client
,
1350 union ioctl_arg
*arg
)
1352 return init_iso_resource(client
,
1353 &arg
->allocate_iso_resource
, ISO_RES_DEALLOC_ONCE
);
1357 * Returns a speed code: Maximum speed to or from this device,
1358 * limited by the device's link speed, the local node's link speed,
1359 * and all PHY port speeds between the two links.
1361 static int ioctl_get_speed(struct client
*client
, union ioctl_arg
*arg
)
1363 return client
->device
->max_speed
;
1366 static int ioctl_send_broadcast_request(struct client
*client
,
1367 union ioctl_arg
*arg
)
1369 struct fw_cdev_send_request
*a
= &arg
->send_request
;
1372 case TCODE_WRITE_QUADLET_REQUEST
:
1373 case TCODE_WRITE_BLOCK_REQUEST
:
1379 /* Security policy: Only allow accesses to Units Space. */
1380 if (a
->offset
< CSR_REGISTER_BASE
+ CSR_CONFIG_ROM_END
)
1383 return init_request(client
, a
, LOCAL_BUS
| 0x3f, SCODE_100
);
1386 static int ioctl_send_stream_packet(struct client
*client
, union ioctl_arg
*arg
)
1388 struct fw_cdev_send_stream_packet
*a
= &arg
->send_stream_packet
;
1389 struct fw_cdev_send_request request
;
1392 if (a
->speed
> client
->device
->card
->link_speed
||
1393 a
->length
> 1024 << a
->speed
)
1396 if (a
->tag
> 3 || a
->channel
> 63 || a
->sy
> 15)
1399 dest
= fw_stream_packet_destination_id(a
->tag
, a
->channel
, a
->sy
);
1400 request
.tcode
= TCODE_STREAM_DATA
;
1401 request
.length
= a
->length
;
1402 request
.closure
= a
->closure
;
1403 request
.data
= a
->data
;
1404 request
.generation
= a
->generation
;
1406 return init_request(client
, &request
, dest
, a
->speed
);
1409 static void outbound_phy_packet_callback(struct fw_packet
*packet
,
1410 struct fw_card
*card
, int status
)
1412 struct outbound_phy_packet_event
*e
=
1413 container_of(packet
, struct outbound_phy_packet_event
, p
);
1417 case ACK_COMPLETE
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1418 /* should never happen with PHY packets: */
1419 case ACK_PENDING
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1422 case ACK_BUSY_B
: e
->phy_packet
.rcode
= RCODE_BUSY
; break;
1423 case ACK_DATA_ERROR
: e
->phy_packet
.rcode
= RCODE_DATA_ERROR
; break;
1424 case ACK_TYPE_ERROR
: e
->phy_packet
.rcode
= RCODE_TYPE_ERROR
; break;
1425 /* stale generation; cancelled; on certain controllers: no ack */
1426 default: e
->phy_packet
.rcode
= status
; break;
1428 e
->phy_packet
.data
[0] = packet
->timestamp
;
1430 queue_event(e
->client
, &e
->event
, &e
->phy_packet
,
1431 sizeof(e
->phy_packet
) + e
->phy_packet
.length
, NULL
, 0);
1432 client_put(e
->client
);
1435 static int ioctl_send_phy_packet(struct client
*client
, union ioctl_arg
*arg
)
1437 struct fw_cdev_send_phy_packet
*a
= &arg
->send_phy_packet
;
1438 struct fw_card
*card
= client
->device
->card
;
1439 struct outbound_phy_packet_event
*e
;
1441 /* Access policy: Allow this ioctl only on local nodes' device files. */
1442 if (!client
->device
->is_local
)
1445 e
= kzalloc(sizeof(*e
) + 4, GFP_KERNEL
);
1451 e
->p
.speed
= SCODE_100
;
1452 e
->p
.generation
= a
->generation
;
1453 e
->p
.header
[0] = a
->data
[0];
1454 e
->p
.header
[1] = a
->data
[1];
1455 e
->p
.header_length
= 8;
1456 e
->p
.callback
= outbound_phy_packet_callback
;
1457 e
->phy_packet
.closure
= a
->closure
;
1458 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_SENT
;
1459 if (is_ping_packet(a
->data
))
1460 e
->phy_packet
.length
= 4;
1462 card
->driver
->send_request(card
, &e
->p
);
1467 static int ioctl_receive_phy_packets(struct client
*client
, union ioctl_arg
*arg
)
1469 struct fw_cdev_receive_phy_packets
*a
= &arg
->receive_phy_packets
;
1470 struct fw_card
*card
= client
->device
->card
;
1472 /* Access policy: Allow this ioctl only on local nodes' device files. */
1473 if (!client
->device
->is_local
)
1476 spin_lock_irq(&card
->lock
);
1478 list_move_tail(&client
->phy_receiver_link
, &card
->phy_receiver_list
);
1479 client
->phy_receiver_closure
= a
->closure
;
1481 spin_unlock_irq(&card
->lock
);
1486 void fw_cdev_handle_phy_packet(struct fw_card
*card
, struct fw_packet
*p
)
1488 struct client
*client
;
1489 struct inbound_phy_packet_event
*e
;
1490 unsigned long flags
;
1492 spin_lock_irqsave(&card
->lock
, flags
);
1494 list_for_each_entry(client
, &card
->phy_receiver_list
, phy_receiver_link
) {
1495 e
= kmalloc(sizeof(*e
) + 8, GFP_ATOMIC
);
1497 fw_notify("Out of memory when allocating event\n");
1500 e
->phy_packet
.closure
= client
->phy_receiver_closure
;
1501 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_RECEIVED
;
1502 e
->phy_packet
.rcode
= RCODE_COMPLETE
;
1503 e
->phy_packet
.length
= 8;
1504 e
->phy_packet
.data
[0] = p
->header
[1];
1505 e
->phy_packet
.data
[1] = p
->header
[2];
1506 queue_event(client
, &e
->event
,
1507 &e
->phy_packet
, sizeof(e
->phy_packet
) + 8, NULL
, 0);
1510 spin_unlock_irqrestore(&card
->lock
, flags
);
1513 static int (* const ioctl_handlers
[])(struct client
*, union ioctl_arg
*) = {
1514 [0x00] = ioctl_get_info
,
1515 [0x01] = ioctl_send_request
,
1516 [0x02] = ioctl_allocate
,
1517 [0x03] = ioctl_deallocate
,
1518 [0x04] = ioctl_send_response
,
1519 [0x05] = ioctl_initiate_bus_reset
,
1520 [0x06] = ioctl_add_descriptor
,
1521 [0x07] = ioctl_remove_descriptor
,
1522 [0x08] = ioctl_create_iso_context
,
1523 [0x09] = ioctl_queue_iso
,
1524 [0x0a] = ioctl_start_iso
,
1525 [0x0b] = ioctl_stop_iso
,
1526 [0x0c] = ioctl_get_cycle_timer
,
1527 [0x0d] = ioctl_allocate_iso_resource
,
1528 [0x0e] = ioctl_deallocate_iso_resource
,
1529 [0x0f] = ioctl_allocate_iso_resource_once
,
1530 [0x10] = ioctl_deallocate_iso_resource_once
,
1531 [0x11] = ioctl_get_speed
,
1532 [0x12] = ioctl_send_broadcast_request
,
1533 [0x13] = ioctl_send_stream_packet
,
1534 [0x14] = ioctl_get_cycle_timer2
,
1535 [0x15] = ioctl_send_phy_packet
,
1536 [0x16] = ioctl_receive_phy_packets
,
1539 static int dispatch_ioctl(struct client
*client
,
1540 unsigned int cmd
, void __user
*arg
)
1542 union ioctl_arg buffer
;
1545 if (fw_device_is_shutdown(client
->device
))
1548 if (_IOC_TYPE(cmd
) != '#' ||
1549 _IOC_NR(cmd
) >= ARRAY_SIZE(ioctl_handlers
) ||
1550 _IOC_SIZE(cmd
) > sizeof(buffer
))
1553 if (_IOC_DIR(cmd
) == _IOC_READ
)
1554 memset(&buffer
, 0, _IOC_SIZE(cmd
));
1556 if (_IOC_DIR(cmd
) & _IOC_WRITE
)
1557 if (copy_from_user(&buffer
, arg
, _IOC_SIZE(cmd
)))
1560 ret
= ioctl_handlers
[_IOC_NR(cmd
)](client
, &buffer
);
1564 if (_IOC_DIR(cmd
) & _IOC_READ
)
1565 if (copy_to_user(arg
, &buffer
, _IOC_SIZE(cmd
)))
1571 static long fw_device_op_ioctl(struct file
*file
,
1572 unsigned int cmd
, unsigned long arg
)
1574 return dispatch_ioctl(file
->private_data
, cmd
, (void __user
*)arg
);
1577 #ifdef CONFIG_COMPAT
1578 static long fw_device_op_compat_ioctl(struct file
*file
,
1579 unsigned int cmd
, unsigned long arg
)
1581 return dispatch_ioctl(file
->private_data
, cmd
, compat_ptr(arg
));
1585 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1587 struct client
*client
= file
->private_data
;
1588 enum dma_data_direction direction
;
1590 int page_count
, ret
;
1592 if (fw_device_is_shutdown(client
->device
))
1595 /* FIXME: We could support multiple buffers, but we don't. */
1596 if (client
->buffer
.pages
!= NULL
)
1599 if (!(vma
->vm_flags
& VM_SHARED
))
1602 if (vma
->vm_start
& ~PAGE_MASK
)
1605 client
->vm_start
= vma
->vm_start
;
1606 size
= vma
->vm_end
- vma
->vm_start
;
1607 page_count
= size
>> PAGE_SHIFT
;
1608 if (size
& ~PAGE_MASK
)
1611 if (vma
->vm_flags
& VM_WRITE
)
1612 direction
= DMA_TO_DEVICE
;
1614 direction
= DMA_FROM_DEVICE
;
1616 ret
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
1617 page_count
, direction
);
1621 ret
= fw_iso_buffer_map(&client
->buffer
, vma
);
1623 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1628 static int shutdown_resource(int id
, void *p
, void *data
)
1630 struct client_resource
*resource
= p
;
1631 struct client
*client
= data
;
1633 resource
->release(client
, resource
);
1639 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
1641 struct client
*client
= file
->private_data
;
1642 struct event
*event
, *next_event
;
1644 spin_lock_irq(&client
->device
->card
->lock
);
1645 list_del(&client
->phy_receiver_link
);
1646 spin_unlock_irq(&client
->device
->card
->lock
);
1648 mutex_lock(&client
->device
->client_list_mutex
);
1649 list_del(&client
->link
);
1650 mutex_unlock(&client
->device
->client_list_mutex
);
1652 if (client
->iso_context
)
1653 fw_iso_context_destroy(client
->iso_context
);
1655 if (client
->buffer
.pages
)
1656 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1658 /* Freeze client->resource_idr and client->event_list */
1659 spin_lock_irq(&client
->lock
);
1660 client
->in_shutdown
= true;
1661 spin_unlock_irq(&client
->lock
);
1663 idr_for_each(&client
->resource_idr
, shutdown_resource
, client
);
1664 idr_remove_all(&client
->resource_idr
);
1665 idr_destroy(&client
->resource_idr
);
1667 list_for_each_entry_safe(event
, next_event
, &client
->event_list
, link
)
1675 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
1677 struct client
*client
= file
->private_data
;
1678 unsigned int mask
= 0;
1680 poll_wait(file
, &client
->wait
, pt
);
1682 if (fw_device_is_shutdown(client
->device
))
1683 mask
|= POLLHUP
| POLLERR
;
1684 if (!list_empty(&client
->event_list
))
1685 mask
|= POLLIN
| POLLRDNORM
;
1690 const struct file_operations fw_device_ops
= {
1691 .owner
= THIS_MODULE
,
1692 .llseek
= no_llseek
,
1693 .open
= fw_device_op_open
,
1694 .read
= fw_device_op_read
,
1695 .unlocked_ioctl
= fw_device_op_ioctl
,
1696 .mmap
= fw_device_op_mmap
,
1697 .release
= fw_device_op_release
,
1698 .poll
= fw_device_op_poll
,
1699 #ifdef CONFIG_COMPAT
1700 .compat_ioctl
= fw_device_op_compat_ioctl
,