2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-cdev.h>
28 #include <linux/idr.h>
29 #include <linux/irqflags.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
34 #include <linux/module.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h> /* required for linux/wait.h */
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/time.h>
42 #include <linux/uaccess.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
47 #include <asm/system.h>
52 * ABI version history is documented in linux/firewire-cdev.h.
54 #define FW_CDEV_KERNEL_VERSION 4
55 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
56 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
60 struct fw_device
*device
;
64 struct idr resource_idr
;
65 struct list_head event_list
;
66 wait_queue_head_t wait
;
67 u64 bus_reset_closure
;
69 struct fw_iso_context
*iso_context
;
71 struct fw_iso_buffer buffer
;
72 unsigned long vm_start
;
74 struct list_head phy_receiver_link
;
75 u64 phy_receiver_closure
;
77 struct list_head link
;
81 static inline void client_get(struct client
*client
)
83 kref_get(&client
->kref
);
86 static void client_release(struct kref
*kref
)
88 struct client
*client
= container_of(kref
, struct client
, kref
);
90 fw_device_put(client
->device
);
94 static void client_put(struct client
*client
)
96 kref_put(&client
->kref
, client_release
);
99 struct client_resource
;
100 typedef void (*client_resource_release_fn_t
)(struct client
*,
101 struct client_resource
*);
102 struct client_resource
{
103 client_resource_release_fn_t release
;
107 struct address_handler_resource
{
108 struct client_resource resource
;
109 struct fw_address_handler handler
;
111 struct client
*client
;
114 struct outbound_transaction_resource
{
115 struct client_resource resource
;
116 struct fw_transaction transaction
;
119 struct inbound_transaction_resource
{
120 struct client_resource resource
;
121 struct fw_card
*card
;
122 struct fw_request
*request
;
127 struct descriptor_resource
{
128 struct client_resource resource
;
129 struct fw_descriptor descriptor
;
133 struct iso_resource
{
134 struct client_resource resource
;
135 struct client
*client
;
136 /* Schedule work and access todo only with client->lock held. */
137 struct delayed_work work
;
138 enum {ISO_RES_ALLOC
, ISO_RES_REALLOC
, ISO_RES_DEALLOC
,
139 ISO_RES_ALLOC_ONCE
, ISO_RES_DEALLOC_ONCE
,} todo
;
143 __be32 transaction_data
[2];
144 struct iso_resource_event
*e_alloc
, *e_dealloc
;
147 static void release_iso_resource(struct client
*, struct client_resource
*);
149 static void schedule_iso_resource(struct iso_resource
*r
, unsigned long delay
)
151 client_get(r
->client
);
152 if (!schedule_delayed_work(&r
->work
, delay
))
153 client_put(r
->client
);
156 static void schedule_if_iso_resource(struct client_resource
*resource
)
158 if (resource
->release
== release_iso_resource
)
159 schedule_iso_resource(container_of(resource
,
160 struct iso_resource
, resource
), 0);
164 * dequeue_event() just kfree()'s the event, so the event has to be
165 * the first field in a struct XYZ_event.
168 struct { void *data
; size_t size
; } v
[2];
169 struct list_head link
;
172 struct bus_reset_event
{
174 struct fw_cdev_event_bus_reset reset
;
177 struct outbound_transaction_event
{
179 struct client
*client
;
180 struct outbound_transaction_resource r
;
181 struct fw_cdev_event_response response
;
184 struct inbound_transaction_event
{
187 struct fw_cdev_event_request request
;
188 struct fw_cdev_event_request2 request2
;
192 struct iso_interrupt_event
{
194 struct fw_cdev_event_iso_interrupt interrupt
;
197 struct iso_interrupt_mc_event
{
199 struct fw_cdev_event_iso_interrupt_mc interrupt
;
202 struct iso_resource_event
{
204 struct fw_cdev_event_iso_resource iso_resource
;
207 struct outbound_phy_packet_event
{
209 struct client
*client
;
211 struct fw_cdev_event_phy_packet phy_packet
;
214 struct inbound_phy_packet_event
{
216 struct fw_cdev_event_phy_packet phy_packet
;
219 static inline void __user
*u64_to_uptr(__u64 value
)
221 return (void __user
*)(unsigned long)value
;
224 static inline __u64
uptr_to_u64(void __user
*ptr
)
226 return (__u64
)(unsigned long)ptr
;
229 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
231 struct fw_device
*device
;
232 struct client
*client
;
234 device
= fw_device_get_by_devt(inode
->i_rdev
);
238 if (fw_device_is_shutdown(device
)) {
239 fw_device_put(device
);
243 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
244 if (client
== NULL
) {
245 fw_device_put(device
);
249 client
->device
= device
;
250 spin_lock_init(&client
->lock
);
251 idr_init(&client
->resource_idr
);
252 INIT_LIST_HEAD(&client
->event_list
);
253 init_waitqueue_head(&client
->wait
);
254 INIT_LIST_HEAD(&client
->phy_receiver_link
);
255 kref_init(&client
->kref
);
257 file
->private_data
= client
;
259 mutex_lock(&device
->client_list_mutex
);
260 list_add_tail(&client
->link
, &device
->client_list
);
261 mutex_unlock(&device
->client_list_mutex
);
263 return nonseekable_open(inode
, file
);
266 static void queue_event(struct client
*client
, struct event
*event
,
267 void *data0
, size_t size0
, void *data1
, size_t size1
)
271 event
->v
[0].data
= data0
;
272 event
->v
[0].size
= size0
;
273 event
->v
[1].data
= data1
;
274 event
->v
[1].size
= size1
;
276 spin_lock_irqsave(&client
->lock
, flags
);
277 if (client
->in_shutdown
)
280 list_add_tail(&event
->link
, &client
->event_list
);
281 spin_unlock_irqrestore(&client
->lock
, flags
);
283 wake_up_interruptible(&client
->wait
);
286 static int dequeue_event(struct client
*client
,
287 char __user
*buffer
, size_t count
)
293 ret
= wait_event_interruptible(client
->wait
,
294 !list_empty(&client
->event_list
) ||
295 fw_device_is_shutdown(client
->device
));
299 if (list_empty(&client
->event_list
) &&
300 fw_device_is_shutdown(client
->device
))
303 spin_lock_irq(&client
->lock
);
304 event
= list_first_entry(&client
->event_list
, struct event
, link
);
305 list_del(&event
->link
);
306 spin_unlock_irq(&client
->lock
);
309 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
310 size
= min(event
->v
[i
].size
, count
- total
);
311 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
)) {
325 static ssize_t
fw_device_op_read(struct file
*file
, char __user
*buffer
,
326 size_t count
, loff_t
*offset
)
328 struct client
*client
= file
->private_data
;
330 return dequeue_event(client
, buffer
, count
);
333 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset
*event
,
334 struct client
*client
)
336 struct fw_card
*card
= client
->device
->card
;
338 spin_lock_irq(&card
->lock
);
340 event
->closure
= client
->bus_reset_closure
;
341 event
->type
= FW_CDEV_EVENT_BUS_RESET
;
342 event
->generation
= client
->device
->generation
;
343 event
->node_id
= client
->device
->node_id
;
344 event
->local_node_id
= card
->local_node
->node_id
;
345 event
->bm_node_id
= card
->bm_node_id
;
346 event
->irm_node_id
= card
->irm_node
->node_id
;
347 event
->root_node_id
= card
->root_node
->node_id
;
349 spin_unlock_irq(&card
->lock
);
352 static void for_each_client(struct fw_device
*device
,
353 void (*callback
)(struct client
*client
))
357 mutex_lock(&device
->client_list_mutex
);
358 list_for_each_entry(c
, &device
->client_list
, link
)
360 mutex_unlock(&device
->client_list_mutex
);
363 static int schedule_reallocations(int id
, void *p
, void *data
)
365 schedule_if_iso_resource(p
);
370 static void queue_bus_reset_event(struct client
*client
)
372 struct bus_reset_event
*e
;
374 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
376 fw_notify("Out of memory when allocating event\n");
380 fill_bus_reset_event(&e
->reset
, client
);
382 queue_event(client
, &e
->event
,
383 &e
->reset
, sizeof(e
->reset
), NULL
, 0);
385 spin_lock_irq(&client
->lock
);
386 idr_for_each(&client
->resource_idr
, schedule_reallocations
, client
);
387 spin_unlock_irq(&client
->lock
);
390 void fw_device_cdev_update(struct fw_device
*device
)
392 for_each_client(device
, queue_bus_reset_event
);
395 static void wake_up_client(struct client
*client
)
397 wake_up_interruptible(&client
->wait
);
400 void fw_device_cdev_remove(struct fw_device
*device
)
402 for_each_client(device
, wake_up_client
);
406 struct fw_cdev_get_info get_info
;
407 struct fw_cdev_send_request send_request
;
408 struct fw_cdev_allocate allocate
;
409 struct fw_cdev_deallocate deallocate
;
410 struct fw_cdev_send_response send_response
;
411 struct fw_cdev_initiate_bus_reset initiate_bus_reset
;
412 struct fw_cdev_add_descriptor add_descriptor
;
413 struct fw_cdev_remove_descriptor remove_descriptor
;
414 struct fw_cdev_create_iso_context create_iso_context
;
415 struct fw_cdev_queue_iso queue_iso
;
416 struct fw_cdev_start_iso start_iso
;
417 struct fw_cdev_stop_iso stop_iso
;
418 struct fw_cdev_get_cycle_timer get_cycle_timer
;
419 struct fw_cdev_allocate_iso_resource allocate_iso_resource
;
420 struct fw_cdev_send_stream_packet send_stream_packet
;
421 struct fw_cdev_get_cycle_timer2 get_cycle_timer2
;
422 struct fw_cdev_send_phy_packet send_phy_packet
;
423 struct fw_cdev_receive_phy_packets receive_phy_packets
;
424 struct fw_cdev_set_iso_channels set_iso_channels
;
427 static int ioctl_get_info(struct client
*client
, union ioctl_arg
*arg
)
429 struct fw_cdev_get_info
*a
= &arg
->get_info
;
430 struct fw_cdev_event_bus_reset bus_reset
;
431 unsigned long ret
= 0;
433 client
->version
= a
->version
;
434 a
->version
= FW_CDEV_KERNEL_VERSION
;
435 a
->card
= client
->device
->card
->index
;
437 down_read(&fw_device_rwsem
);
440 size_t want
= a
->rom_length
;
441 size_t have
= client
->device
->config_rom_length
* 4;
443 ret
= copy_to_user(u64_to_uptr(a
->rom
),
444 client
->device
->config_rom
, min(want
, have
));
446 a
->rom_length
= client
->device
->config_rom_length
* 4;
448 up_read(&fw_device_rwsem
);
453 client
->bus_reset_closure
= a
->bus_reset_closure
;
454 if (a
->bus_reset
!= 0) {
455 fill_bus_reset_event(&bus_reset
, client
);
456 if (copy_to_user(u64_to_uptr(a
->bus_reset
),
457 &bus_reset
, sizeof(bus_reset
)))
464 static int add_client_resource(struct client
*client
,
465 struct client_resource
*resource
, gfp_t gfp_mask
)
471 if (idr_pre_get(&client
->resource_idr
, gfp_mask
) == 0)
474 spin_lock_irqsave(&client
->lock
, flags
);
475 if (client
->in_shutdown
)
478 ret
= idr_get_new(&client
->resource_idr
, resource
,
482 schedule_if_iso_resource(resource
);
484 spin_unlock_irqrestore(&client
->lock
, flags
);
489 return ret
< 0 ? ret
: 0;
492 static int release_client_resource(struct client
*client
, u32 handle
,
493 client_resource_release_fn_t release
,
494 struct client_resource
**return_resource
)
496 struct client_resource
*resource
;
498 spin_lock_irq(&client
->lock
);
499 if (client
->in_shutdown
)
502 resource
= idr_find(&client
->resource_idr
, handle
);
503 if (resource
&& resource
->release
== release
)
504 idr_remove(&client
->resource_idr
, handle
);
505 spin_unlock_irq(&client
->lock
);
507 if (!(resource
&& resource
->release
== release
))
511 *return_resource
= resource
;
513 resource
->release(client
, resource
);
520 static void release_transaction(struct client
*client
,
521 struct client_resource
*resource
)
523 struct outbound_transaction_resource
*r
= container_of(resource
,
524 struct outbound_transaction_resource
, resource
);
526 fw_cancel_transaction(client
->device
->card
, &r
->transaction
);
529 static void complete_transaction(struct fw_card
*card
, int rcode
,
530 void *payload
, size_t length
, void *data
)
532 struct outbound_transaction_event
*e
= data
;
533 struct fw_cdev_event_response
*rsp
= &e
->response
;
534 struct client
*client
= e
->client
;
537 if (length
< rsp
->length
)
538 rsp
->length
= length
;
539 if (rcode
== RCODE_COMPLETE
)
540 memcpy(rsp
->data
, payload
, rsp
->length
);
542 spin_lock_irqsave(&client
->lock
, flags
);
544 * 1. If called while in shutdown, the idr tree must be left untouched.
545 * The idr handle will be removed and the client reference will be
547 * 2. If the call chain was release_client_resource ->
548 * release_transaction -> complete_transaction (instead of a normal
549 * conclusion of the transaction), i.e. if this resource was already
550 * unregistered from the idr, the client reference will be dropped
551 * by release_client_resource and we must not drop it here.
553 if (!client
->in_shutdown
&&
554 idr_find(&client
->resource_idr
, e
->r
.resource
.handle
)) {
555 idr_remove(&client
->resource_idr
, e
->r
.resource
.handle
);
556 /* Drop the idr's reference */
559 spin_unlock_irqrestore(&client
->lock
, flags
);
561 rsp
->type
= FW_CDEV_EVENT_RESPONSE
;
565 * In the case that sizeof(*rsp) doesn't align with the position of the
566 * data, and the read is short, preserve an extra copy of the data
567 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
568 * for short reads and some apps depended on it, this is both safe
569 * and prudent for compatibility.
571 if (rsp
->length
<= sizeof(*rsp
) - offsetof(typeof(*rsp
), data
))
572 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
),
573 rsp
->data
, rsp
->length
);
575 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
) + rsp
->length
,
578 /* Drop the transaction callback's reference */
582 static int init_request(struct client
*client
,
583 struct fw_cdev_send_request
*request
,
584 int destination_id
, int speed
)
586 struct outbound_transaction_event
*e
;
589 if (request
->tcode
!= TCODE_STREAM_DATA
&&
590 (request
->length
> 4096 || request
->length
> 512 << speed
))
593 if (request
->tcode
== TCODE_WRITE_QUADLET_REQUEST
&&
597 e
= kmalloc(sizeof(*e
) + request
->length
, GFP_KERNEL
);
602 e
->response
.length
= request
->length
;
603 e
->response
.closure
= request
->closure
;
606 copy_from_user(e
->response
.data
,
607 u64_to_uptr(request
->data
), request
->length
)) {
612 e
->r
.resource
.release
= release_transaction
;
613 ret
= add_client_resource(client
, &e
->r
.resource
, GFP_KERNEL
);
617 /* Get a reference for the transaction callback */
620 fw_send_request(client
->device
->card
, &e
->r
.transaction
,
621 request
->tcode
, destination_id
, request
->generation
,
622 speed
, request
->offset
, e
->response
.data
,
623 request
->length
, complete_transaction
, e
);
632 static int ioctl_send_request(struct client
*client
, union ioctl_arg
*arg
)
634 switch (arg
->send_request
.tcode
) {
635 case TCODE_WRITE_QUADLET_REQUEST
:
636 case TCODE_WRITE_BLOCK_REQUEST
:
637 case TCODE_READ_QUADLET_REQUEST
:
638 case TCODE_READ_BLOCK_REQUEST
:
639 case TCODE_LOCK_MASK_SWAP
:
640 case TCODE_LOCK_COMPARE_SWAP
:
641 case TCODE_LOCK_FETCH_ADD
:
642 case TCODE_LOCK_LITTLE_ADD
:
643 case TCODE_LOCK_BOUNDED_ADD
:
644 case TCODE_LOCK_WRAP_ADD
:
645 case TCODE_LOCK_VENDOR_DEPENDENT
:
651 return init_request(client
, &arg
->send_request
, client
->device
->node_id
,
652 client
->device
->max_speed
);
655 static inline bool is_fcp_request(struct fw_request
*request
)
657 return request
== NULL
;
660 static void release_request(struct client
*client
,
661 struct client_resource
*resource
)
663 struct inbound_transaction_resource
*r
= container_of(resource
,
664 struct inbound_transaction_resource
, resource
);
666 if (is_fcp_request(r
->request
))
669 fw_send_response(r
->card
, r
->request
, RCODE_CONFLICT_ERROR
);
671 fw_card_put(r
->card
);
675 static void handle_request(struct fw_card
*card
, struct fw_request
*request
,
676 int tcode
, int destination
, int source
,
677 int generation
, unsigned long long offset
,
678 void *payload
, size_t length
, void *callback_data
)
680 struct address_handler_resource
*handler
= callback_data
;
681 struct inbound_transaction_resource
*r
;
682 struct inbound_transaction_event
*e
;
684 void *fcp_frame
= NULL
;
687 /* card may be different from handler->client->device->card */
690 r
= kmalloc(sizeof(*r
), GFP_ATOMIC
);
691 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
692 if (r
== NULL
|| e
== NULL
) {
693 fw_notify("Out of memory when allocating event\n");
697 r
->request
= request
;
701 if (is_fcp_request(request
)) {
703 * FIXME: Let core-transaction.c manage a
704 * single reference-counted copy?
706 fcp_frame
= kmemdup(payload
, length
, GFP_ATOMIC
);
707 if (fcp_frame
== NULL
)
713 r
->resource
.release
= release_request
;
714 ret
= add_client_resource(handler
->client
, &r
->resource
, GFP_ATOMIC
);
718 if (handler
->client
->version
< FW_CDEV_VERSION_EVENT_REQUEST2
) {
719 struct fw_cdev_event_request
*req
= &e
->req
.request
;
722 tcode
= TCODE_LOCK_REQUEST
;
724 req
->type
= FW_CDEV_EVENT_REQUEST
;
726 req
->offset
= offset
;
727 req
->length
= length
;
728 req
->handle
= r
->resource
.handle
;
729 req
->closure
= handler
->closure
;
730 event_size0
= sizeof(*req
);
732 struct fw_cdev_event_request2
*req
= &e
->req
.request2
;
734 req
->type
= FW_CDEV_EVENT_REQUEST2
;
736 req
->offset
= offset
;
737 req
->source_node_id
= source
;
738 req
->destination_node_id
= destination
;
739 req
->card
= card
->index
;
740 req
->generation
= generation
;
741 req
->length
= length
;
742 req
->handle
= r
->resource
.handle
;
743 req
->closure
= handler
->closure
;
744 event_size0
= sizeof(*req
);
747 queue_event(handler
->client
, &e
->event
,
748 &e
->req
, event_size0
, r
->data
, length
);
756 if (!is_fcp_request(request
))
757 fw_send_response(card
, request
, RCODE_CONFLICT_ERROR
);
762 static void release_address_handler(struct client
*client
,
763 struct client_resource
*resource
)
765 struct address_handler_resource
*r
=
766 container_of(resource
, struct address_handler_resource
, resource
);
768 fw_core_remove_address_handler(&r
->handler
);
772 static int ioctl_allocate(struct client
*client
, union ioctl_arg
*arg
)
774 struct fw_cdev_allocate
*a
= &arg
->allocate
;
775 struct address_handler_resource
*r
;
776 struct fw_address_region region
;
779 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
783 region
.start
= a
->offset
;
784 if (client
->version
< FW_CDEV_VERSION_ALLOCATE_REGION_END
)
785 region
.end
= a
->offset
+ a
->length
;
787 region
.end
= a
->region_end
;
789 r
->handler
.length
= a
->length
;
790 r
->handler
.address_callback
= handle_request
;
791 r
->handler
.callback_data
= r
;
792 r
->closure
= a
->closure
;
795 ret
= fw_core_add_address_handler(&r
->handler
, ®ion
);
800 a
->offset
= r
->handler
.offset
;
802 r
->resource
.release
= release_address_handler
;
803 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
805 release_address_handler(client
, &r
->resource
);
808 a
->handle
= r
->resource
.handle
;
813 static int ioctl_deallocate(struct client
*client
, union ioctl_arg
*arg
)
815 return release_client_resource(client
, arg
->deallocate
.handle
,
816 release_address_handler
, NULL
);
819 static int ioctl_send_response(struct client
*client
, union ioctl_arg
*arg
)
821 struct fw_cdev_send_response
*a
= &arg
->send_response
;
822 struct client_resource
*resource
;
823 struct inbound_transaction_resource
*r
;
826 if (release_client_resource(client
, a
->handle
,
827 release_request
, &resource
) < 0)
830 r
= container_of(resource
, struct inbound_transaction_resource
,
832 if (is_fcp_request(r
->request
))
835 if (a
->length
!= fw_get_response_length(r
->request
)) {
840 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
)) {
845 fw_send_response(r
->card
, r
->request
, a
->rcode
);
847 fw_card_put(r
->card
);
853 static int ioctl_initiate_bus_reset(struct client
*client
, union ioctl_arg
*arg
)
855 fw_schedule_bus_reset(client
->device
->card
, true,
856 arg
->initiate_bus_reset
.type
== FW_CDEV_SHORT_RESET
);
860 static void release_descriptor(struct client
*client
,
861 struct client_resource
*resource
)
863 struct descriptor_resource
*r
=
864 container_of(resource
, struct descriptor_resource
, resource
);
866 fw_core_remove_descriptor(&r
->descriptor
);
870 static int ioctl_add_descriptor(struct client
*client
, union ioctl_arg
*arg
)
872 struct fw_cdev_add_descriptor
*a
= &arg
->add_descriptor
;
873 struct descriptor_resource
*r
;
876 /* Access policy: Allow this ioctl only on local nodes' device files. */
877 if (!client
->device
->is_local
)
883 r
= kmalloc(sizeof(*r
) + a
->length
* 4, GFP_KERNEL
);
887 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
* 4)) {
892 r
->descriptor
.length
= a
->length
;
893 r
->descriptor
.immediate
= a
->immediate
;
894 r
->descriptor
.key
= a
->key
;
895 r
->descriptor
.data
= r
->data
;
897 ret
= fw_core_add_descriptor(&r
->descriptor
);
901 r
->resource
.release
= release_descriptor
;
902 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
904 fw_core_remove_descriptor(&r
->descriptor
);
907 a
->handle
= r
->resource
.handle
;
916 static int ioctl_remove_descriptor(struct client
*client
, union ioctl_arg
*arg
)
918 return release_client_resource(client
, arg
->remove_descriptor
.handle
,
919 release_descriptor
, NULL
);
922 static void iso_callback(struct fw_iso_context
*context
, u32 cycle
,
923 size_t header_length
, void *header
, void *data
)
925 struct client
*client
= data
;
926 struct iso_interrupt_event
*e
;
928 e
= kmalloc(sizeof(*e
) + header_length
, GFP_ATOMIC
);
930 fw_notify("Out of memory when allocating event\n");
933 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
934 e
->interrupt
.closure
= client
->iso_closure
;
935 e
->interrupt
.cycle
= cycle
;
936 e
->interrupt
.header_length
= header_length
;
937 memcpy(e
->interrupt
.header
, header
, header_length
);
938 queue_event(client
, &e
->event
, &e
->interrupt
,
939 sizeof(e
->interrupt
) + header_length
, NULL
, 0);
942 static void iso_mc_callback(struct fw_iso_context
*context
,
943 dma_addr_t completed
, void *data
)
945 struct client
*client
= data
;
946 struct iso_interrupt_mc_event
*e
;
948 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
950 fw_notify("Out of memory when allocating event\n");
953 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
;
954 e
->interrupt
.closure
= client
->iso_closure
;
955 e
->interrupt
.completed
= fw_iso_buffer_lookup(&client
->buffer
,
957 queue_event(client
, &e
->event
, &e
->interrupt
,
958 sizeof(e
->interrupt
), NULL
, 0);
961 static int ioctl_create_iso_context(struct client
*client
, union ioctl_arg
*arg
)
963 struct fw_cdev_create_iso_context
*a
= &arg
->create_iso_context
;
964 struct fw_iso_context
*context
;
965 fw_iso_callback_t cb
;
967 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT
!= FW_ISO_CONTEXT_TRANSMIT
||
968 FW_CDEV_ISO_CONTEXT_RECEIVE
!= FW_ISO_CONTEXT_RECEIVE
||
969 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL
!=
970 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
);
973 case FW_ISO_CONTEXT_TRANSMIT
:
974 if (a
->speed
> SCODE_3200
|| a
->channel
> 63)
980 case FW_ISO_CONTEXT_RECEIVE
:
981 if (a
->header_size
< 4 || (a
->header_size
& 3) ||
988 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
989 cb
= (fw_iso_callback_t
)iso_mc_callback
;
996 context
= fw_iso_context_create(client
->device
->card
, a
->type
,
997 a
->channel
, a
->speed
, a
->header_size
, cb
, client
);
999 return PTR_ERR(context
);
1001 /* We only support one context at this time. */
1002 spin_lock_irq(&client
->lock
);
1003 if (client
->iso_context
!= NULL
) {
1004 spin_unlock_irq(&client
->lock
);
1005 fw_iso_context_destroy(context
);
1008 client
->iso_closure
= a
->closure
;
1009 client
->iso_context
= context
;
1010 spin_unlock_irq(&client
->lock
);
1017 static int ioctl_set_iso_channels(struct client
*client
, union ioctl_arg
*arg
)
1019 struct fw_cdev_set_iso_channels
*a
= &arg
->set_iso_channels
;
1020 struct fw_iso_context
*ctx
= client
->iso_context
;
1022 if (ctx
== NULL
|| a
->handle
!= 0)
1025 return fw_iso_context_set_channels(ctx
, &a
->channels
);
1028 /* Macros for decoding the iso packet control header. */
1029 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1030 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1031 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1032 #define GET_TAG(v) (((v) >> 18) & 0x03)
1033 #define GET_SY(v) (((v) >> 20) & 0x0f)
1034 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1036 static int ioctl_queue_iso(struct client
*client
, union ioctl_arg
*arg
)
1038 struct fw_cdev_queue_iso
*a
= &arg
->queue_iso
;
1039 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
1040 struct fw_iso_context
*ctx
= client
->iso_context
;
1041 unsigned long payload
, buffer_end
, transmit_header_bytes
= 0;
1045 struct fw_iso_packet packet
;
1049 if (ctx
== NULL
|| a
->handle
!= 0)
1053 * If the user passes a non-NULL data pointer, has mmap()'ed
1054 * the iso buffer, and the pointer points inside the buffer,
1055 * we setup the payload pointers accordingly. Otherwise we
1056 * set them both to 0, which will still let packets with
1057 * payload_length == 0 through. In other words, if no packets
1058 * use the indirect payload, the iso buffer need not be mapped
1059 * and the a->data pointer is ignored.
1061 payload
= (unsigned long)a
->data
- client
->vm_start
;
1062 buffer_end
= client
->buffer
.page_count
<< PAGE_SHIFT
;
1063 if (a
->data
== 0 || client
->buffer
.pages
== NULL
||
1064 payload
>= buffer_end
) {
1069 if (ctx
->type
== FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
&& payload
& 3)
1072 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(a
->packets
);
1073 if (!access_ok(VERIFY_READ
, p
, a
->size
))
1076 end
= (void __user
*)p
+ a
->size
;
1079 if (get_user(control
, &p
->control
))
1081 u
.packet
.payload_length
= GET_PAYLOAD_LENGTH(control
);
1082 u
.packet
.interrupt
= GET_INTERRUPT(control
);
1083 u
.packet
.skip
= GET_SKIP(control
);
1084 u
.packet
.tag
= GET_TAG(control
);
1085 u
.packet
.sy
= GET_SY(control
);
1086 u
.packet
.header_length
= GET_HEADER_LENGTH(control
);
1088 switch (ctx
->type
) {
1089 case FW_ISO_CONTEXT_TRANSMIT
:
1090 if (u
.packet
.header_length
& 3)
1092 transmit_header_bytes
= u
.packet
.header_length
;
1095 case FW_ISO_CONTEXT_RECEIVE
:
1096 if (u
.packet
.header_length
== 0 ||
1097 u
.packet
.header_length
% ctx
->header_size
!= 0)
1101 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
1102 if (u
.packet
.payload_length
== 0 ||
1103 u
.packet
.payload_length
& 3)
1108 next
= (struct fw_cdev_iso_packet __user
*)
1109 &p
->header
[transmit_header_bytes
/ 4];
1112 if (__copy_from_user
1113 (u
.packet
.header
, p
->header
, transmit_header_bytes
))
1115 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
1116 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
1118 if (payload
+ u
.packet
.payload_length
> buffer_end
)
1121 if (fw_iso_context_queue(ctx
, &u
.packet
,
1122 &client
->buffer
, payload
))
1126 payload
+= u
.packet
.payload_length
;
1130 a
->size
-= uptr_to_u64(p
) - a
->packets
;
1131 a
->packets
= uptr_to_u64(p
);
1132 a
->data
= client
->vm_start
+ payload
;
1137 static int ioctl_start_iso(struct client
*client
, union ioctl_arg
*arg
)
1139 struct fw_cdev_start_iso
*a
= &arg
->start_iso
;
1142 FW_CDEV_ISO_CONTEXT_MATCH_TAG0
!= FW_ISO_CONTEXT_MATCH_TAG0
||
1143 FW_CDEV_ISO_CONTEXT_MATCH_TAG1
!= FW_ISO_CONTEXT_MATCH_TAG1
||
1144 FW_CDEV_ISO_CONTEXT_MATCH_TAG2
!= FW_ISO_CONTEXT_MATCH_TAG2
||
1145 FW_CDEV_ISO_CONTEXT_MATCH_TAG3
!= FW_ISO_CONTEXT_MATCH_TAG3
||
1146 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS
!= FW_ISO_CONTEXT_MATCH_ALL_TAGS
);
1148 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1151 if (client
->iso_context
->type
== FW_ISO_CONTEXT_RECEIVE
&&
1152 (a
->tags
== 0 || a
->tags
> 15 || a
->sync
> 15))
1155 return fw_iso_context_start(client
->iso_context
,
1156 a
->cycle
, a
->sync
, a
->tags
);
1159 static int ioctl_stop_iso(struct client
*client
, union ioctl_arg
*arg
)
1161 struct fw_cdev_stop_iso
*a
= &arg
->stop_iso
;
1163 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1166 return fw_iso_context_stop(client
->iso_context
);
1169 static int ioctl_get_cycle_timer2(struct client
*client
, union ioctl_arg
*arg
)
1171 struct fw_cdev_get_cycle_timer2
*a
= &arg
->get_cycle_timer2
;
1172 struct fw_card
*card
= client
->device
->card
;
1173 struct timespec ts
= {0, 0};
1177 local_irq_disable();
1179 cycle_time
= card
->driver
->read_csr(card
, CSR_CYCLE_TIME
);
1181 switch (a
->clk_id
) {
1182 case CLOCK_REALTIME
: getnstimeofday(&ts
); break;
1183 case CLOCK_MONOTONIC
: do_posix_clock_monotonic_gettime(&ts
); break;
1184 case CLOCK_MONOTONIC_RAW
: getrawmonotonic(&ts
); break;
1191 a
->tv_sec
= ts
.tv_sec
;
1192 a
->tv_nsec
= ts
.tv_nsec
;
1193 a
->cycle_timer
= cycle_time
;
1198 static int ioctl_get_cycle_timer(struct client
*client
, union ioctl_arg
*arg
)
1200 struct fw_cdev_get_cycle_timer
*a
= &arg
->get_cycle_timer
;
1201 struct fw_cdev_get_cycle_timer2 ct2
;
1203 ct2
.clk_id
= CLOCK_REALTIME
;
1204 ioctl_get_cycle_timer2(client
, (union ioctl_arg
*)&ct2
);
1206 a
->local_time
= ct2
.tv_sec
* USEC_PER_SEC
+ ct2
.tv_nsec
/ NSEC_PER_USEC
;
1207 a
->cycle_timer
= ct2
.cycle_timer
;
1212 static void iso_resource_work(struct work_struct
*work
)
1214 struct iso_resource_event
*e
;
1215 struct iso_resource
*r
=
1216 container_of(work
, struct iso_resource
, work
.work
);
1217 struct client
*client
= r
->client
;
1218 int generation
, channel
, bandwidth
, todo
;
1219 bool skip
, free
, success
;
1221 spin_lock_irq(&client
->lock
);
1222 generation
= client
->device
->generation
;
1224 /* Allow 1000ms grace period for other reallocations. */
1225 if (todo
== ISO_RES_ALLOC
&&
1226 time_is_after_jiffies(client
->device
->card
->reset_jiffies
+ HZ
)) {
1227 schedule_iso_resource(r
, DIV_ROUND_UP(HZ
, 3));
1230 /* We could be called twice within the same generation. */
1231 skip
= todo
== ISO_RES_REALLOC
&&
1232 r
->generation
== generation
;
1234 free
= todo
== ISO_RES_DEALLOC
||
1235 todo
== ISO_RES_ALLOC_ONCE
||
1236 todo
== ISO_RES_DEALLOC_ONCE
;
1237 r
->generation
= generation
;
1238 spin_unlock_irq(&client
->lock
);
1243 bandwidth
= r
->bandwidth
;
1245 fw_iso_resource_manage(client
->device
->card
, generation
,
1246 r
->channels
, &channel
, &bandwidth
,
1247 todo
== ISO_RES_ALLOC
||
1248 todo
== ISO_RES_REALLOC
||
1249 todo
== ISO_RES_ALLOC_ONCE
,
1250 r
->transaction_data
);
1252 * Is this generation outdated already? As long as this resource sticks
1253 * in the idr, it will be scheduled again for a newer generation or at
1256 if (channel
== -EAGAIN
&&
1257 (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_REALLOC
))
1260 success
= channel
>= 0 || bandwidth
> 0;
1262 spin_lock_irq(&client
->lock
);
1264 * Transit from allocation to reallocation, except if the client
1265 * requested deallocation in the meantime.
1267 if (r
->todo
== ISO_RES_ALLOC
)
1268 r
->todo
= ISO_RES_REALLOC
;
1270 * Allocation or reallocation failure? Pull this resource out of the
1271 * idr and prepare for deletion, unless the client is shutting down.
1273 if (r
->todo
== ISO_RES_REALLOC
&& !success
&&
1274 !client
->in_shutdown
&&
1275 idr_find(&client
->resource_idr
, r
->resource
.handle
)) {
1276 idr_remove(&client
->resource_idr
, r
->resource
.handle
);
1280 spin_unlock_irq(&client
->lock
);
1282 if (todo
== ISO_RES_ALLOC
&& channel
>= 0)
1283 r
->channels
= 1ULL << channel
;
1285 if (todo
== ISO_RES_REALLOC
&& success
)
1288 if (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_ALLOC_ONCE
) {
1293 r
->e_dealloc
= NULL
;
1295 e
->iso_resource
.handle
= r
->resource
.handle
;
1296 e
->iso_resource
.channel
= channel
;
1297 e
->iso_resource
.bandwidth
= bandwidth
;
1299 queue_event(client
, &e
->event
,
1300 &e
->iso_resource
, sizeof(e
->iso_resource
), NULL
, 0);
1303 cancel_delayed_work(&r
->work
);
1305 kfree(r
->e_dealloc
);
1312 static void release_iso_resource(struct client
*client
,
1313 struct client_resource
*resource
)
1315 struct iso_resource
*r
=
1316 container_of(resource
, struct iso_resource
, resource
);
1318 spin_lock_irq(&client
->lock
);
1319 r
->todo
= ISO_RES_DEALLOC
;
1320 schedule_iso_resource(r
, 0);
1321 spin_unlock_irq(&client
->lock
);
1324 static int init_iso_resource(struct client
*client
,
1325 struct fw_cdev_allocate_iso_resource
*request
, int todo
)
1327 struct iso_resource_event
*e1
, *e2
;
1328 struct iso_resource
*r
;
1331 if ((request
->channels
== 0 && request
->bandwidth
== 0) ||
1332 request
->bandwidth
> BANDWIDTH_AVAILABLE_INITIAL
||
1333 request
->bandwidth
< 0)
1336 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
1337 e1
= kmalloc(sizeof(*e1
), GFP_KERNEL
);
1338 e2
= kmalloc(sizeof(*e2
), GFP_KERNEL
);
1339 if (r
== NULL
|| e1
== NULL
|| e2
== NULL
) {
1344 INIT_DELAYED_WORK(&r
->work
, iso_resource_work
);
1348 r
->channels
= request
->channels
;
1349 r
->bandwidth
= request
->bandwidth
;
1353 e1
->iso_resource
.closure
= request
->closure
;
1354 e1
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED
;
1355 e2
->iso_resource
.closure
= request
->closure
;
1356 e2
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
;
1358 if (todo
== ISO_RES_ALLOC
) {
1359 r
->resource
.release
= release_iso_resource
;
1360 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
1364 r
->resource
.release
= NULL
;
1365 r
->resource
.handle
= -1;
1366 schedule_iso_resource(r
, 0);
1368 request
->handle
= r
->resource
.handle
;
1379 static int ioctl_allocate_iso_resource(struct client
*client
,
1380 union ioctl_arg
*arg
)
1382 return init_iso_resource(client
,
1383 &arg
->allocate_iso_resource
, ISO_RES_ALLOC
);
1386 static int ioctl_deallocate_iso_resource(struct client
*client
,
1387 union ioctl_arg
*arg
)
1389 return release_client_resource(client
,
1390 arg
->deallocate
.handle
, release_iso_resource
, NULL
);
1393 static int ioctl_allocate_iso_resource_once(struct client
*client
,
1394 union ioctl_arg
*arg
)
1396 return init_iso_resource(client
,
1397 &arg
->allocate_iso_resource
, ISO_RES_ALLOC_ONCE
);
1400 static int ioctl_deallocate_iso_resource_once(struct client
*client
,
1401 union ioctl_arg
*arg
)
1403 return init_iso_resource(client
,
1404 &arg
->allocate_iso_resource
, ISO_RES_DEALLOC_ONCE
);
1408 * Returns a speed code: Maximum speed to or from this device,
1409 * limited by the device's link speed, the local node's link speed,
1410 * and all PHY port speeds between the two links.
1412 static int ioctl_get_speed(struct client
*client
, union ioctl_arg
*arg
)
1414 return client
->device
->max_speed
;
1417 static int ioctl_send_broadcast_request(struct client
*client
,
1418 union ioctl_arg
*arg
)
1420 struct fw_cdev_send_request
*a
= &arg
->send_request
;
1423 case TCODE_WRITE_QUADLET_REQUEST
:
1424 case TCODE_WRITE_BLOCK_REQUEST
:
1430 /* Security policy: Only allow accesses to Units Space. */
1431 if (a
->offset
< CSR_REGISTER_BASE
+ CSR_CONFIG_ROM_END
)
1434 return init_request(client
, a
, LOCAL_BUS
| 0x3f, SCODE_100
);
1437 static int ioctl_send_stream_packet(struct client
*client
, union ioctl_arg
*arg
)
1439 struct fw_cdev_send_stream_packet
*a
= &arg
->send_stream_packet
;
1440 struct fw_cdev_send_request request
;
1443 if (a
->speed
> client
->device
->card
->link_speed
||
1444 a
->length
> 1024 << a
->speed
)
1447 if (a
->tag
> 3 || a
->channel
> 63 || a
->sy
> 15)
1450 dest
= fw_stream_packet_destination_id(a
->tag
, a
->channel
, a
->sy
);
1451 request
.tcode
= TCODE_STREAM_DATA
;
1452 request
.length
= a
->length
;
1453 request
.closure
= a
->closure
;
1454 request
.data
= a
->data
;
1455 request
.generation
= a
->generation
;
1457 return init_request(client
, &request
, dest
, a
->speed
);
1460 static void outbound_phy_packet_callback(struct fw_packet
*packet
,
1461 struct fw_card
*card
, int status
)
1463 struct outbound_phy_packet_event
*e
=
1464 container_of(packet
, struct outbound_phy_packet_event
, p
);
1468 case ACK_COMPLETE
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1469 /* should never happen with PHY packets: */
1470 case ACK_PENDING
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1473 case ACK_BUSY_B
: e
->phy_packet
.rcode
= RCODE_BUSY
; break;
1474 case ACK_DATA_ERROR
: e
->phy_packet
.rcode
= RCODE_DATA_ERROR
; break;
1475 case ACK_TYPE_ERROR
: e
->phy_packet
.rcode
= RCODE_TYPE_ERROR
; break;
1476 /* stale generation; cancelled; on certain controllers: no ack */
1477 default: e
->phy_packet
.rcode
= status
; break;
1479 e
->phy_packet
.data
[0] = packet
->timestamp
;
1481 queue_event(e
->client
, &e
->event
, &e
->phy_packet
,
1482 sizeof(e
->phy_packet
) + e
->phy_packet
.length
, NULL
, 0);
1483 client_put(e
->client
);
1486 static int ioctl_send_phy_packet(struct client
*client
, union ioctl_arg
*arg
)
1488 struct fw_cdev_send_phy_packet
*a
= &arg
->send_phy_packet
;
1489 struct fw_card
*card
= client
->device
->card
;
1490 struct outbound_phy_packet_event
*e
;
1492 /* Access policy: Allow this ioctl only on local nodes' device files. */
1493 if (!client
->device
->is_local
)
1496 e
= kzalloc(sizeof(*e
) + 4, GFP_KERNEL
);
1502 e
->p
.speed
= SCODE_100
;
1503 e
->p
.generation
= a
->generation
;
1504 e
->p
.header
[0] = TCODE_LINK_INTERNAL
<< 4;
1505 e
->p
.header
[1] = a
->data
[0];
1506 e
->p
.header
[2] = a
->data
[1];
1507 e
->p
.header_length
= 12;
1508 e
->p
.callback
= outbound_phy_packet_callback
;
1509 e
->phy_packet
.closure
= a
->closure
;
1510 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_SENT
;
1511 if (is_ping_packet(a
->data
))
1512 e
->phy_packet
.length
= 4;
1514 card
->driver
->send_request(card
, &e
->p
);
1519 static int ioctl_receive_phy_packets(struct client
*client
, union ioctl_arg
*arg
)
1521 struct fw_cdev_receive_phy_packets
*a
= &arg
->receive_phy_packets
;
1522 struct fw_card
*card
= client
->device
->card
;
1524 /* Access policy: Allow this ioctl only on local nodes' device files. */
1525 if (!client
->device
->is_local
)
1528 spin_lock_irq(&card
->lock
);
1530 list_move_tail(&client
->phy_receiver_link
, &card
->phy_receiver_list
);
1531 client
->phy_receiver_closure
= a
->closure
;
1533 spin_unlock_irq(&card
->lock
);
1538 void fw_cdev_handle_phy_packet(struct fw_card
*card
, struct fw_packet
*p
)
1540 struct client
*client
;
1541 struct inbound_phy_packet_event
*e
;
1542 unsigned long flags
;
1544 spin_lock_irqsave(&card
->lock
, flags
);
1546 list_for_each_entry(client
, &card
->phy_receiver_list
, phy_receiver_link
) {
1547 e
= kmalloc(sizeof(*e
) + 8, GFP_ATOMIC
);
1549 fw_notify("Out of memory when allocating event\n");
1552 e
->phy_packet
.closure
= client
->phy_receiver_closure
;
1553 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_RECEIVED
;
1554 e
->phy_packet
.rcode
= RCODE_COMPLETE
;
1555 e
->phy_packet
.length
= 8;
1556 e
->phy_packet
.data
[0] = p
->header
[1];
1557 e
->phy_packet
.data
[1] = p
->header
[2];
1558 queue_event(client
, &e
->event
,
1559 &e
->phy_packet
, sizeof(e
->phy_packet
) + 8, NULL
, 0);
1562 spin_unlock_irqrestore(&card
->lock
, flags
);
1565 static int (* const ioctl_handlers
[])(struct client
*, union ioctl_arg
*) = {
1566 [0x00] = ioctl_get_info
,
1567 [0x01] = ioctl_send_request
,
1568 [0x02] = ioctl_allocate
,
1569 [0x03] = ioctl_deallocate
,
1570 [0x04] = ioctl_send_response
,
1571 [0x05] = ioctl_initiate_bus_reset
,
1572 [0x06] = ioctl_add_descriptor
,
1573 [0x07] = ioctl_remove_descriptor
,
1574 [0x08] = ioctl_create_iso_context
,
1575 [0x09] = ioctl_queue_iso
,
1576 [0x0a] = ioctl_start_iso
,
1577 [0x0b] = ioctl_stop_iso
,
1578 [0x0c] = ioctl_get_cycle_timer
,
1579 [0x0d] = ioctl_allocate_iso_resource
,
1580 [0x0e] = ioctl_deallocate_iso_resource
,
1581 [0x0f] = ioctl_allocate_iso_resource_once
,
1582 [0x10] = ioctl_deallocate_iso_resource_once
,
1583 [0x11] = ioctl_get_speed
,
1584 [0x12] = ioctl_send_broadcast_request
,
1585 [0x13] = ioctl_send_stream_packet
,
1586 [0x14] = ioctl_get_cycle_timer2
,
1587 [0x15] = ioctl_send_phy_packet
,
1588 [0x16] = ioctl_receive_phy_packets
,
1589 [0x17] = ioctl_set_iso_channels
,
1592 static int dispatch_ioctl(struct client
*client
,
1593 unsigned int cmd
, void __user
*arg
)
1595 union ioctl_arg buffer
;
1598 if (fw_device_is_shutdown(client
->device
))
1601 if (_IOC_TYPE(cmd
) != '#' ||
1602 _IOC_NR(cmd
) >= ARRAY_SIZE(ioctl_handlers
) ||
1603 _IOC_SIZE(cmd
) > sizeof(buffer
))
1606 if (_IOC_DIR(cmd
) == _IOC_READ
)
1607 memset(&buffer
, 0, _IOC_SIZE(cmd
));
1609 if (_IOC_DIR(cmd
) & _IOC_WRITE
)
1610 if (copy_from_user(&buffer
, arg
, _IOC_SIZE(cmd
)))
1613 ret
= ioctl_handlers
[_IOC_NR(cmd
)](client
, &buffer
);
1617 if (_IOC_DIR(cmd
) & _IOC_READ
)
1618 if (copy_to_user(arg
, &buffer
, _IOC_SIZE(cmd
)))
1624 static long fw_device_op_ioctl(struct file
*file
,
1625 unsigned int cmd
, unsigned long arg
)
1627 return dispatch_ioctl(file
->private_data
, cmd
, (void __user
*)arg
);
1630 #ifdef CONFIG_COMPAT
1631 static long fw_device_op_compat_ioctl(struct file
*file
,
1632 unsigned int cmd
, unsigned long arg
)
1634 return dispatch_ioctl(file
->private_data
, cmd
, compat_ptr(arg
));
1638 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1640 struct client
*client
= file
->private_data
;
1641 enum dma_data_direction direction
;
1643 int page_count
, ret
;
1645 if (fw_device_is_shutdown(client
->device
))
1648 /* FIXME: We could support multiple buffers, but we don't. */
1649 if (client
->buffer
.pages
!= NULL
)
1652 if (!(vma
->vm_flags
& VM_SHARED
))
1655 if (vma
->vm_start
& ~PAGE_MASK
)
1658 client
->vm_start
= vma
->vm_start
;
1659 size
= vma
->vm_end
- vma
->vm_start
;
1660 page_count
= size
>> PAGE_SHIFT
;
1661 if (size
& ~PAGE_MASK
)
1664 if (vma
->vm_flags
& VM_WRITE
)
1665 direction
= DMA_TO_DEVICE
;
1667 direction
= DMA_FROM_DEVICE
;
1669 ret
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
1670 page_count
, direction
);
1674 ret
= fw_iso_buffer_map(&client
->buffer
, vma
);
1676 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1681 static int shutdown_resource(int id
, void *p
, void *data
)
1683 struct client_resource
*resource
= p
;
1684 struct client
*client
= data
;
1686 resource
->release(client
, resource
);
1692 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
1694 struct client
*client
= file
->private_data
;
1695 struct event
*event
, *next_event
;
1697 spin_lock_irq(&client
->device
->card
->lock
);
1698 list_del(&client
->phy_receiver_link
);
1699 spin_unlock_irq(&client
->device
->card
->lock
);
1701 mutex_lock(&client
->device
->client_list_mutex
);
1702 list_del(&client
->link
);
1703 mutex_unlock(&client
->device
->client_list_mutex
);
1705 if (client
->iso_context
)
1706 fw_iso_context_destroy(client
->iso_context
);
1708 if (client
->buffer
.pages
)
1709 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1711 /* Freeze client->resource_idr and client->event_list */
1712 spin_lock_irq(&client
->lock
);
1713 client
->in_shutdown
= true;
1714 spin_unlock_irq(&client
->lock
);
1716 idr_for_each(&client
->resource_idr
, shutdown_resource
, client
);
1717 idr_remove_all(&client
->resource_idr
);
1718 idr_destroy(&client
->resource_idr
);
1720 list_for_each_entry_safe(event
, next_event
, &client
->event_list
, link
)
1728 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
1730 struct client
*client
= file
->private_data
;
1731 unsigned int mask
= 0;
1733 poll_wait(file
, &client
->wait
, pt
);
1735 if (fw_device_is_shutdown(client
->device
))
1736 mask
|= POLLHUP
| POLLERR
;
1737 if (!list_empty(&client
->event_list
))
1738 mask
|= POLLIN
| POLLRDNORM
;
1743 const struct file_operations fw_device_ops
= {
1744 .owner
= THIS_MODULE
,
1745 .llseek
= no_llseek
,
1746 .open
= fw_device_op_open
,
1747 .read
= fw_device_op_read
,
1748 .unlocked_ioctl
= fw_device_op_ioctl
,
1749 .mmap
= fw_device_op_mmap
,
1750 .release
= fw_device_op_release
,
1751 .poll
= fw_device_op_poll
,
1752 #ifdef CONFIG_COMPAT
1753 .compat_ioctl
= fw_device_op_compat_ioctl
,