2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/errno.h>
27 #include <linux/firewire.h>
28 #include <linux/firewire-cdev.h>
29 #include <linux/idr.h>
30 #include <linux/irqflags.h>
31 #include <linux/jiffies.h>
32 #include <linux/kernel.h>
33 #include <linux/kref.h>
35 #include <linux/module.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/sched.h> /* required for linux/wait.h */
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
42 #include <linux/time.h>
43 #include <linux/uaccess.h>
44 #include <linux/vmalloc.h>
45 #include <linux/wait.h>
46 #include <linux/workqueue.h>
52 * ABI version history is documented in linux/firewire-cdev.h.
54 #define FW_CDEV_KERNEL_VERSION 5
55 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
56 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
60 struct fw_device
*device
;
64 struct idr resource_idr
;
65 struct list_head event_list
;
66 wait_queue_head_t wait
;
67 wait_queue_head_t tx_flush_wait
;
68 u64 bus_reset_closure
;
70 struct fw_iso_context
*iso_context
;
72 struct fw_iso_buffer buffer
;
73 unsigned long vm_start
;
74 bool buffer_is_mapped
;
76 struct list_head phy_receiver_link
;
77 u64 phy_receiver_closure
;
79 struct list_head link
;
83 static inline void client_get(struct client
*client
)
85 kref_get(&client
->kref
);
88 static void client_release(struct kref
*kref
)
90 struct client
*client
= container_of(kref
, struct client
, kref
);
92 fw_device_put(client
->device
);
96 static void client_put(struct client
*client
)
98 kref_put(&client
->kref
, client_release
);
101 struct client_resource
;
102 typedef void (*client_resource_release_fn_t
)(struct client
*,
103 struct client_resource
*);
104 struct client_resource
{
105 client_resource_release_fn_t release
;
109 struct address_handler_resource
{
110 struct client_resource resource
;
111 struct fw_address_handler handler
;
113 struct client
*client
;
116 struct outbound_transaction_resource
{
117 struct client_resource resource
;
118 struct fw_transaction transaction
;
121 struct inbound_transaction_resource
{
122 struct client_resource resource
;
123 struct fw_card
*card
;
124 struct fw_request
*request
;
129 struct descriptor_resource
{
130 struct client_resource resource
;
131 struct fw_descriptor descriptor
;
135 struct iso_resource
{
136 struct client_resource resource
;
137 struct client
*client
;
138 /* Schedule work and access todo only with client->lock held. */
139 struct delayed_work work
;
140 enum {ISO_RES_ALLOC
, ISO_RES_REALLOC
, ISO_RES_DEALLOC
,
141 ISO_RES_ALLOC_ONCE
, ISO_RES_DEALLOC_ONCE
,} todo
;
145 struct iso_resource_event
*e_alloc
, *e_dealloc
;
148 static void release_iso_resource(struct client
*, struct client_resource
*);
150 static void schedule_iso_resource(struct iso_resource
*r
, unsigned long delay
)
152 client_get(r
->client
);
153 if (!queue_delayed_work(fw_workqueue
, &r
->work
, delay
))
154 client_put(r
->client
);
157 static void schedule_if_iso_resource(struct client_resource
*resource
)
159 if (resource
->release
== release_iso_resource
)
160 schedule_iso_resource(container_of(resource
,
161 struct iso_resource
, resource
), 0);
165 * dequeue_event() just kfree()'s the event, so the event has to be
166 * the first field in a struct XYZ_event.
169 struct { void *data
; size_t size
; } v
[2];
170 struct list_head link
;
173 struct bus_reset_event
{
175 struct fw_cdev_event_bus_reset reset
;
178 struct outbound_transaction_event
{
180 struct client
*client
;
181 struct outbound_transaction_resource r
;
182 struct fw_cdev_event_response response
;
185 struct inbound_transaction_event
{
188 struct fw_cdev_event_request request
;
189 struct fw_cdev_event_request2 request2
;
193 struct iso_interrupt_event
{
195 struct fw_cdev_event_iso_interrupt interrupt
;
198 struct iso_interrupt_mc_event
{
200 struct fw_cdev_event_iso_interrupt_mc interrupt
;
203 struct iso_resource_event
{
205 struct fw_cdev_event_iso_resource iso_resource
;
208 struct outbound_phy_packet_event
{
210 struct client
*client
;
212 struct fw_cdev_event_phy_packet phy_packet
;
215 struct inbound_phy_packet_event
{
217 struct fw_cdev_event_phy_packet phy_packet
;
221 static void __user
*u64_to_uptr(u64 value
)
223 if (is_compat_task())
224 return compat_ptr(value
);
226 return (void __user
*)(unsigned long)value
;
229 static u64
uptr_to_u64(void __user
*ptr
)
231 if (is_compat_task())
232 return ptr_to_compat(ptr
);
234 return (u64
)(unsigned long)ptr
;
237 static inline void __user
*u64_to_uptr(u64 value
)
239 return (void __user
*)(unsigned long)value
;
242 static inline u64
uptr_to_u64(void __user
*ptr
)
244 return (u64
)(unsigned long)ptr
;
246 #endif /* CONFIG_COMPAT */
248 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
250 struct fw_device
*device
;
251 struct client
*client
;
253 device
= fw_device_get_by_devt(inode
->i_rdev
);
257 if (fw_device_is_shutdown(device
)) {
258 fw_device_put(device
);
262 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
263 if (client
== NULL
) {
264 fw_device_put(device
);
268 client
->device
= device
;
269 spin_lock_init(&client
->lock
);
270 idr_init(&client
->resource_idr
);
271 INIT_LIST_HEAD(&client
->event_list
);
272 init_waitqueue_head(&client
->wait
);
273 init_waitqueue_head(&client
->tx_flush_wait
);
274 INIT_LIST_HEAD(&client
->phy_receiver_link
);
275 INIT_LIST_HEAD(&client
->link
);
276 kref_init(&client
->kref
);
278 file
->private_data
= client
;
280 return nonseekable_open(inode
, file
);
283 static void queue_event(struct client
*client
, struct event
*event
,
284 void *data0
, size_t size0
, void *data1
, size_t size1
)
288 event
->v
[0].data
= data0
;
289 event
->v
[0].size
= size0
;
290 event
->v
[1].data
= data1
;
291 event
->v
[1].size
= size1
;
293 spin_lock_irqsave(&client
->lock
, flags
);
294 if (client
->in_shutdown
)
297 list_add_tail(&event
->link
, &client
->event_list
);
298 spin_unlock_irqrestore(&client
->lock
, flags
);
300 wake_up_interruptible(&client
->wait
);
303 static int dequeue_event(struct client
*client
,
304 char __user
*buffer
, size_t count
)
310 ret
= wait_event_interruptible(client
->wait
,
311 !list_empty(&client
->event_list
) ||
312 fw_device_is_shutdown(client
->device
));
316 if (list_empty(&client
->event_list
) &&
317 fw_device_is_shutdown(client
->device
))
320 spin_lock_irq(&client
->lock
);
321 event
= list_first_entry(&client
->event_list
, struct event
, link
);
322 list_del(&event
->link
);
323 spin_unlock_irq(&client
->lock
);
326 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
327 size
= min(event
->v
[i
].size
, count
- total
);
328 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
)) {
342 static ssize_t
fw_device_op_read(struct file
*file
, char __user
*buffer
,
343 size_t count
, loff_t
*offset
)
345 struct client
*client
= file
->private_data
;
347 return dequeue_event(client
, buffer
, count
);
350 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset
*event
,
351 struct client
*client
)
353 struct fw_card
*card
= client
->device
->card
;
355 spin_lock_irq(&card
->lock
);
357 event
->closure
= client
->bus_reset_closure
;
358 event
->type
= FW_CDEV_EVENT_BUS_RESET
;
359 event
->generation
= client
->device
->generation
;
360 event
->node_id
= client
->device
->node_id
;
361 event
->local_node_id
= card
->local_node
->node_id
;
362 event
->bm_node_id
= card
->bm_node_id
;
363 event
->irm_node_id
= card
->irm_node
->node_id
;
364 event
->root_node_id
= card
->root_node
->node_id
;
366 spin_unlock_irq(&card
->lock
);
369 static void for_each_client(struct fw_device
*device
,
370 void (*callback
)(struct client
*client
))
374 mutex_lock(&device
->client_list_mutex
);
375 list_for_each_entry(c
, &device
->client_list
, link
)
377 mutex_unlock(&device
->client_list_mutex
);
380 static int schedule_reallocations(int id
, void *p
, void *data
)
382 schedule_if_iso_resource(p
);
387 static void queue_bus_reset_event(struct client
*client
)
389 struct bus_reset_event
*e
;
391 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
395 fill_bus_reset_event(&e
->reset
, client
);
397 queue_event(client
, &e
->event
,
398 &e
->reset
, sizeof(e
->reset
), NULL
, 0);
400 spin_lock_irq(&client
->lock
);
401 idr_for_each(&client
->resource_idr
, schedule_reallocations
, client
);
402 spin_unlock_irq(&client
->lock
);
405 void fw_device_cdev_update(struct fw_device
*device
)
407 for_each_client(device
, queue_bus_reset_event
);
410 static void wake_up_client(struct client
*client
)
412 wake_up_interruptible(&client
->wait
);
415 void fw_device_cdev_remove(struct fw_device
*device
)
417 for_each_client(device
, wake_up_client
);
421 struct fw_cdev_get_info get_info
;
422 struct fw_cdev_send_request send_request
;
423 struct fw_cdev_allocate allocate
;
424 struct fw_cdev_deallocate deallocate
;
425 struct fw_cdev_send_response send_response
;
426 struct fw_cdev_initiate_bus_reset initiate_bus_reset
;
427 struct fw_cdev_add_descriptor add_descriptor
;
428 struct fw_cdev_remove_descriptor remove_descriptor
;
429 struct fw_cdev_create_iso_context create_iso_context
;
430 struct fw_cdev_queue_iso queue_iso
;
431 struct fw_cdev_start_iso start_iso
;
432 struct fw_cdev_stop_iso stop_iso
;
433 struct fw_cdev_get_cycle_timer get_cycle_timer
;
434 struct fw_cdev_allocate_iso_resource allocate_iso_resource
;
435 struct fw_cdev_send_stream_packet send_stream_packet
;
436 struct fw_cdev_get_cycle_timer2 get_cycle_timer2
;
437 struct fw_cdev_send_phy_packet send_phy_packet
;
438 struct fw_cdev_receive_phy_packets receive_phy_packets
;
439 struct fw_cdev_set_iso_channels set_iso_channels
;
440 struct fw_cdev_flush_iso flush_iso
;
443 static int ioctl_get_info(struct client
*client
, union ioctl_arg
*arg
)
445 struct fw_cdev_get_info
*a
= &arg
->get_info
;
446 struct fw_cdev_event_bus_reset bus_reset
;
447 unsigned long ret
= 0;
449 client
->version
= a
->version
;
450 a
->version
= FW_CDEV_KERNEL_VERSION
;
451 a
->card
= client
->device
->card
->index
;
453 down_read(&fw_device_rwsem
);
456 size_t want
= a
->rom_length
;
457 size_t have
= client
->device
->config_rom_length
* 4;
459 ret
= copy_to_user(u64_to_uptr(a
->rom
),
460 client
->device
->config_rom
, min(want
, have
));
462 a
->rom_length
= client
->device
->config_rom_length
* 4;
464 up_read(&fw_device_rwsem
);
469 mutex_lock(&client
->device
->client_list_mutex
);
471 client
->bus_reset_closure
= a
->bus_reset_closure
;
472 if (a
->bus_reset
!= 0) {
473 fill_bus_reset_event(&bus_reset
, client
);
474 /* unaligned size of bus_reset is 36 bytes */
475 ret
= copy_to_user(u64_to_uptr(a
->bus_reset
), &bus_reset
, 36);
477 if (ret
== 0 && list_empty(&client
->link
))
478 list_add_tail(&client
->link
, &client
->device
->client_list
);
480 mutex_unlock(&client
->device
->client_list_mutex
);
482 return ret
? -EFAULT
: 0;
485 static int add_client_resource(struct client
*client
,
486 struct client_resource
*resource
, gfp_t gfp_mask
)
488 bool preload
= gfp_mask
& __GFP_WAIT
;
493 idr_preload(gfp_mask
);
494 spin_lock_irqsave(&client
->lock
, flags
);
496 if (client
->in_shutdown
)
499 ret
= idr_alloc(&client
->resource_idr
, resource
, 0, 0,
502 resource
->handle
= ret
;
504 schedule_if_iso_resource(resource
);
507 spin_unlock_irqrestore(&client
->lock
, flags
);
511 return ret
< 0 ? ret
: 0;
514 static int release_client_resource(struct client
*client
, u32 handle
,
515 client_resource_release_fn_t release
,
516 struct client_resource
**return_resource
)
518 struct client_resource
*resource
;
520 spin_lock_irq(&client
->lock
);
521 if (client
->in_shutdown
)
524 resource
= idr_find(&client
->resource_idr
, handle
);
525 if (resource
&& resource
->release
== release
)
526 idr_remove(&client
->resource_idr
, handle
);
527 spin_unlock_irq(&client
->lock
);
529 if (!(resource
&& resource
->release
== release
))
533 *return_resource
= resource
;
535 resource
->release(client
, resource
);
542 static void release_transaction(struct client
*client
,
543 struct client_resource
*resource
)
547 static void complete_transaction(struct fw_card
*card
, int rcode
,
548 void *payload
, size_t length
, void *data
)
550 struct outbound_transaction_event
*e
= data
;
551 struct fw_cdev_event_response
*rsp
= &e
->response
;
552 struct client
*client
= e
->client
;
555 if (length
< rsp
->length
)
556 rsp
->length
= length
;
557 if (rcode
== RCODE_COMPLETE
)
558 memcpy(rsp
->data
, payload
, rsp
->length
);
560 spin_lock_irqsave(&client
->lock
, flags
);
561 idr_remove(&client
->resource_idr
, e
->r
.resource
.handle
);
562 if (client
->in_shutdown
)
563 wake_up(&client
->tx_flush_wait
);
564 spin_unlock_irqrestore(&client
->lock
, flags
);
566 rsp
->type
= FW_CDEV_EVENT_RESPONSE
;
570 * In the case that sizeof(*rsp) doesn't align with the position of the
571 * data, and the read is short, preserve an extra copy of the data
572 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
573 * for short reads and some apps depended on it, this is both safe
574 * and prudent for compatibility.
576 if (rsp
->length
<= sizeof(*rsp
) - offsetof(typeof(*rsp
), data
))
577 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
),
578 rsp
->data
, rsp
->length
);
580 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
) + rsp
->length
,
583 /* Drop the idr's reference */
587 static int init_request(struct client
*client
,
588 struct fw_cdev_send_request
*request
,
589 int destination_id
, int speed
)
591 struct outbound_transaction_event
*e
;
594 if (request
->tcode
!= TCODE_STREAM_DATA
&&
595 (request
->length
> 4096 || request
->length
> 512 << speed
))
598 if (request
->tcode
== TCODE_WRITE_QUADLET_REQUEST
&&
602 e
= kmalloc(sizeof(*e
) + request
->length
, GFP_KERNEL
);
607 e
->response
.length
= request
->length
;
608 e
->response
.closure
= request
->closure
;
611 copy_from_user(e
->response
.data
,
612 u64_to_uptr(request
->data
), request
->length
)) {
617 e
->r
.resource
.release
= release_transaction
;
618 ret
= add_client_resource(client
, &e
->r
.resource
, GFP_KERNEL
);
622 fw_send_request(client
->device
->card
, &e
->r
.transaction
,
623 request
->tcode
, destination_id
, request
->generation
,
624 speed
, request
->offset
, e
->response
.data
,
625 request
->length
, complete_transaction
, e
);
634 static int ioctl_send_request(struct client
*client
, union ioctl_arg
*arg
)
636 switch (arg
->send_request
.tcode
) {
637 case TCODE_WRITE_QUADLET_REQUEST
:
638 case TCODE_WRITE_BLOCK_REQUEST
:
639 case TCODE_READ_QUADLET_REQUEST
:
640 case TCODE_READ_BLOCK_REQUEST
:
641 case TCODE_LOCK_MASK_SWAP
:
642 case TCODE_LOCK_COMPARE_SWAP
:
643 case TCODE_LOCK_FETCH_ADD
:
644 case TCODE_LOCK_LITTLE_ADD
:
645 case TCODE_LOCK_BOUNDED_ADD
:
646 case TCODE_LOCK_WRAP_ADD
:
647 case TCODE_LOCK_VENDOR_DEPENDENT
:
653 return init_request(client
, &arg
->send_request
, client
->device
->node_id
,
654 client
->device
->max_speed
);
657 static inline bool is_fcp_request(struct fw_request
*request
)
659 return request
== NULL
;
662 static void release_request(struct client
*client
,
663 struct client_resource
*resource
)
665 struct inbound_transaction_resource
*r
= container_of(resource
,
666 struct inbound_transaction_resource
, resource
);
668 if (is_fcp_request(r
->request
))
671 fw_send_response(r
->card
, r
->request
, RCODE_CONFLICT_ERROR
);
673 fw_card_put(r
->card
);
677 static void handle_request(struct fw_card
*card
, struct fw_request
*request
,
678 int tcode
, int destination
, int source
,
679 int generation
, unsigned long long offset
,
680 void *payload
, size_t length
, void *callback_data
)
682 struct address_handler_resource
*handler
= callback_data
;
683 struct inbound_transaction_resource
*r
;
684 struct inbound_transaction_event
*e
;
686 void *fcp_frame
= NULL
;
689 /* card may be different from handler->client->device->card */
692 r
= kmalloc(sizeof(*r
), GFP_ATOMIC
);
693 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
694 if (r
== NULL
|| e
== NULL
)
698 r
->request
= request
;
702 if (is_fcp_request(request
)) {
704 * FIXME: Let core-transaction.c manage a
705 * single reference-counted copy?
707 fcp_frame
= kmemdup(payload
, length
, GFP_ATOMIC
);
708 if (fcp_frame
== NULL
)
714 r
->resource
.release
= release_request
;
715 ret
= add_client_resource(handler
->client
, &r
->resource
, GFP_ATOMIC
);
719 if (handler
->client
->version
< FW_CDEV_VERSION_EVENT_REQUEST2
) {
720 struct fw_cdev_event_request
*req
= &e
->req
.request
;
723 tcode
= TCODE_LOCK_REQUEST
;
725 req
->type
= FW_CDEV_EVENT_REQUEST
;
727 req
->offset
= offset
;
728 req
->length
= length
;
729 req
->handle
= r
->resource
.handle
;
730 req
->closure
= handler
->closure
;
731 event_size0
= sizeof(*req
);
733 struct fw_cdev_event_request2
*req
= &e
->req
.request2
;
735 req
->type
= FW_CDEV_EVENT_REQUEST2
;
737 req
->offset
= offset
;
738 req
->source_node_id
= source
;
739 req
->destination_node_id
= destination
;
740 req
->card
= card
->index
;
741 req
->generation
= generation
;
742 req
->length
= length
;
743 req
->handle
= r
->resource
.handle
;
744 req
->closure
= handler
->closure
;
745 event_size0
= sizeof(*req
);
748 queue_event(handler
->client
, &e
->event
,
749 &e
->req
, event_size0
, r
->data
, length
);
757 if (!is_fcp_request(request
))
758 fw_send_response(card
, request
, RCODE_CONFLICT_ERROR
);
763 static void release_address_handler(struct client
*client
,
764 struct client_resource
*resource
)
766 struct address_handler_resource
*r
=
767 container_of(resource
, struct address_handler_resource
, resource
);
769 fw_core_remove_address_handler(&r
->handler
);
773 static int ioctl_allocate(struct client
*client
, union ioctl_arg
*arg
)
775 struct fw_cdev_allocate
*a
= &arg
->allocate
;
776 struct address_handler_resource
*r
;
777 struct fw_address_region region
;
780 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
784 region
.start
= a
->offset
;
785 if (client
->version
< FW_CDEV_VERSION_ALLOCATE_REGION_END
)
786 region
.end
= a
->offset
+ a
->length
;
788 region
.end
= a
->region_end
;
790 r
->handler
.length
= a
->length
;
791 r
->handler
.address_callback
= handle_request
;
792 r
->handler
.callback_data
= r
;
793 r
->closure
= a
->closure
;
796 ret
= fw_core_add_address_handler(&r
->handler
, ®ion
);
801 a
->offset
= r
->handler
.offset
;
803 r
->resource
.release
= release_address_handler
;
804 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
806 release_address_handler(client
, &r
->resource
);
809 a
->handle
= r
->resource
.handle
;
814 static int ioctl_deallocate(struct client
*client
, union ioctl_arg
*arg
)
816 return release_client_resource(client
, arg
->deallocate
.handle
,
817 release_address_handler
, NULL
);
820 static int ioctl_send_response(struct client
*client
, union ioctl_arg
*arg
)
822 struct fw_cdev_send_response
*a
= &arg
->send_response
;
823 struct client_resource
*resource
;
824 struct inbound_transaction_resource
*r
;
827 if (release_client_resource(client
, a
->handle
,
828 release_request
, &resource
) < 0)
831 r
= container_of(resource
, struct inbound_transaction_resource
,
833 if (is_fcp_request(r
->request
))
836 if (a
->length
!= fw_get_response_length(r
->request
)) {
841 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
)) {
846 fw_send_response(r
->card
, r
->request
, a
->rcode
);
848 fw_card_put(r
->card
);
854 static int ioctl_initiate_bus_reset(struct client
*client
, union ioctl_arg
*arg
)
856 fw_schedule_bus_reset(client
->device
->card
, true,
857 arg
->initiate_bus_reset
.type
== FW_CDEV_SHORT_RESET
);
861 static void release_descriptor(struct client
*client
,
862 struct client_resource
*resource
)
864 struct descriptor_resource
*r
=
865 container_of(resource
, struct descriptor_resource
, resource
);
867 fw_core_remove_descriptor(&r
->descriptor
);
871 static int ioctl_add_descriptor(struct client
*client
, union ioctl_arg
*arg
)
873 struct fw_cdev_add_descriptor
*a
= &arg
->add_descriptor
;
874 struct descriptor_resource
*r
;
877 /* Access policy: Allow this ioctl only on local nodes' device files. */
878 if (!client
->device
->is_local
)
884 r
= kmalloc(sizeof(*r
) + a
->length
* 4, GFP_KERNEL
);
888 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
* 4)) {
893 r
->descriptor
.length
= a
->length
;
894 r
->descriptor
.immediate
= a
->immediate
;
895 r
->descriptor
.key
= a
->key
;
896 r
->descriptor
.data
= r
->data
;
898 ret
= fw_core_add_descriptor(&r
->descriptor
);
902 r
->resource
.release
= release_descriptor
;
903 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
905 fw_core_remove_descriptor(&r
->descriptor
);
908 a
->handle
= r
->resource
.handle
;
917 static int ioctl_remove_descriptor(struct client
*client
, union ioctl_arg
*arg
)
919 return release_client_resource(client
, arg
->remove_descriptor
.handle
,
920 release_descriptor
, NULL
);
923 static void iso_callback(struct fw_iso_context
*context
, u32 cycle
,
924 size_t header_length
, void *header
, void *data
)
926 struct client
*client
= data
;
927 struct iso_interrupt_event
*e
;
929 e
= kmalloc(sizeof(*e
) + header_length
, GFP_ATOMIC
);
933 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
934 e
->interrupt
.closure
= client
->iso_closure
;
935 e
->interrupt
.cycle
= cycle
;
936 e
->interrupt
.header_length
= header_length
;
937 memcpy(e
->interrupt
.header
, header
, header_length
);
938 queue_event(client
, &e
->event
, &e
->interrupt
,
939 sizeof(e
->interrupt
) + header_length
, NULL
, 0);
942 static void iso_mc_callback(struct fw_iso_context
*context
,
943 dma_addr_t completed
, void *data
)
945 struct client
*client
= data
;
946 struct iso_interrupt_mc_event
*e
;
948 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
952 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
;
953 e
->interrupt
.closure
= client
->iso_closure
;
954 e
->interrupt
.completed
= fw_iso_buffer_lookup(&client
->buffer
,
956 queue_event(client
, &e
->event
, &e
->interrupt
,
957 sizeof(e
->interrupt
), NULL
, 0);
960 static enum dma_data_direction
iso_dma_direction(struct fw_iso_context
*context
)
962 if (context
->type
== FW_ISO_CONTEXT_TRANSMIT
)
963 return DMA_TO_DEVICE
;
965 return DMA_FROM_DEVICE
;
968 static int ioctl_create_iso_context(struct client
*client
, union ioctl_arg
*arg
)
970 struct fw_cdev_create_iso_context
*a
= &arg
->create_iso_context
;
971 struct fw_iso_context
*context
;
972 fw_iso_callback_t cb
;
975 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT
!= FW_ISO_CONTEXT_TRANSMIT
||
976 FW_CDEV_ISO_CONTEXT_RECEIVE
!= FW_ISO_CONTEXT_RECEIVE
||
977 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL
!=
978 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
);
981 case FW_ISO_CONTEXT_TRANSMIT
:
982 if (a
->speed
> SCODE_3200
|| a
->channel
> 63)
988 case FW_ISO_CONTEXT_RECEIVE
:
989 if (a
->header_size
< 4 || (a
->header_size
& 3) ||
996 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
997 cb
= (fw_iso_callback_t
)iso_mc_callback
;
1004 context
= fw_iso_context_create(client
->device
->card
, a
->type
,
1005 a
->channel
, a
->speed
, a
->header_size
, cb
, client
);
1006 if (IS_ERR(context
))
1007 return PTR_ERR(context
);
1009 /* We only support one context at this time. */
1010 spin_lock_irq(&client
->lock
);
1011 if (client
->iso_context
!= NULL
) {
1012 spin_unlock_irq(&client
->lock
);
1013 fw_iso_context_destroy(context
);
1017 if (!client
->buffer_is_mapped
) {
1018 ret
= fw_iso_buffer_map_dma(&client
->buffer
,
1019 client
->device
->card
,
1020 iso_dma_direction(context
));
1022 spin_unlock_irq(&client
->lock
);
1023 fw_iso_context_destroy(context
);
1027 client
->buffer_is_mapped
= true;
1029 client
->iso_closure
= a
->closure
;
1030 client
->iso_context
= context
;
1031 spin_unlock_irq(&client
->lock
);
1038 static int ioctl_set_iso_channels(struct client
*client
, union ioctl_arg
*arg
)
1040 struct fw_cdev_set_iso_channels
*a
= &arg
->set_iso_channels
;
1041 struct fw_iso_context
*ctx
= client
->iso_context
;
1043 if (ctx
== NULL
|| a
->handle
!= 0)
1046 return fw_iso_context_set_channels(ctx
, &a
->channels
);
1049 /* Macros for decoding the iso packet control header. */
1050 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1051 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1052 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1053 #define GET_TAG(v) (((v) >> 18) & 0x03)
1054 #define GET_SY(v) (((v) >> 20) & 0x0f)
1055 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1057 static int ioctl_queue_iso(struct client
*client
, union ioctl_arg
*arg
)
1059 struct fw_cdev_queue_iso
*a
= &arg
->queue_iso
;
1060 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
1061 struct fw_iso_context
*ctx
= client
->iso_context
;
1062 unsigned long payload
, buffer_end
, transmit_header_bytes
= 0;
1066 struct fw_iso_packet packet
;
1070 if (ctx
== NULL
|| a
->handle
!= 0)
1074 * If the user passes a non-NULL data pointer, has mmap()'ed
1075 * the iso buffer, and the pointer points inside the buffer,
1076 * we setup the payload pointers accordingly. Otherwise we
1077 * set them both to 0, which will still let packets with
1078 * payload_length == 0 through. In other words, if no packets
1079 * use the indirect payload, the iso buffer need not be mapped
1080 * and the a->data pointer is ignored.
1082 payload
= (unsigned long)a
->data
- client
->vm_start
;
1083 buffer_end
= client
->buffer
.page_count
<< PAGE_SHIFT
;
1084 if (a
->data
== 0 || client
->buffer
.pages
== NULL
||
1085 payload
>= buffer_end
) {
1090 if (ctx
->type
== FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
&& payload
& 3)
1093 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(a
->packets
);
1094 if (!access_ok(VERIFY_READ
, p
, a
->size
))
1097 end
= (void __user
*)p
+ a
->size
;
1100 if (get_user(control
, &p
->control
))
1102 u
.packet
.payload_length
= GET_PAYLOAD_LENGTH(control
);
1103 u
.packet
.interrupt
= GET_INTERRUPT(control
);
1104 u
.packet
.skip
= GET_SKIP(control
);
1105 u
.packet
.tag
= GET_TAG(control
);
1106 u
.packet
.sy
= GET_SY(control
);
1107 u
.packet
.header_length
= GET_HEADER_LENGTH(control
);
1109 switch (ctx
->type
) {
1110 case FW_ISO_CONTEXT_TRANSMIT
:
1111 if (u
.packet
.header_length
& 3)
1113 transmit_header_bytes
= u
.packet
.header_length
;
1116 case FW_ISO_CONTEXT_RECEIVE
:
1117 if (u
.packet
.header_length
== 0 ||
1118 u
.packet
.header_length
% ctx
->header_size
!= 0)
1122 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
1123 if (u
.packet
.payload_length
== 0 ||
1124 u
.packet
.payload_length
& 3)
1129 next
= (struct fw_cdev_iso_packet __user
*)
1130 &p
->header
[transmit_header_bytes
/ 4];
1133 if (__copy_from_user
1134 (u
.packet
.header
, p
->header
, transmit_header_bytes
))
1136 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
1137 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
1139 if (payload
+ u
.packet
.payload_length
> buffer_end
)
1142 if (fw_iso_context_queue(ctx
, &u
.packet
,
1143 &client
->buffer
, payload
))
1147 payload
+= u
.packet
.payload_length
;
1150 fw_iso_context_queue_flush(ctx
);
1152 a
->size
-= uptr_to_u64(p
) - a
->packets
;
1153 a
->packets
= uptr_to_u64(p
);
1154 a
->data
= client
->vm_start
+ payload
;
1159 static int ioctl_start_iso(struct client
*client
, union ioctl_arg
*arg
)
1161 struct fw_cdev_start_iso
*a
= &arg
->start_iso
;
1164 FW_CDEV_ISO_CONTEXT_MATCH_TAG0
!= FW_ISO_CONTEXT_MATCH_TAG0
||
1165 FW_CDEV_ISO_CONTEXT_MATCH_TAG1
!= FW_ISO_CONTEXT_MATCH_TAG1
||
1166 FW_CDEV_ISO_CONTEXT_MATCH_TAG2
!= FW_ISO_CONTEXT_MATCH_TAG2
||
1167 FW_CDEV_ISO_CONTEXT_MATCH_TAG3
!= FW_ISO_CONTEXT_MATCH_TAG3
||
1168 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS
!= FW_ISO_CONTEXT_MATCH_ALL_TAGS
);
1170 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1173 if (client
->iso_context
->type
== FW_ISO_CONTEXT_RECEIVE
&&
1174 (a
->tags
== 0 || a
->tags
> 15 || a
->sync
> 15))
1177 return fw_iso_context_start(client
->iso_context
,
1178 a
->cycle
, a
->sync
, a
->tags
);
1181 static int ioctl_stop_iso(struct client
*client
, union ioctl_arg
*arg
)
1183 struct fw_cdev_stop_iso
*a
= &arg
->stop_iso
;
1185 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1188 return fw_iso_context_stop(client
->iso_context
);
1191 static int ioctl_flush_iso(struct client
*client
, union ioctl_arg
*arg
)
1193 struct fw_cdev_flush_iso
*a
= &arg
->flush_iso
;
1195 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1198 return fw_iso_context_flush_completions(client
->iso_context
);
1201 static int ioctl_get_cycle_timer2(struct client
*client
, union ioctl_arg
*arg
)
1203 struct fw_cdev_get_cycle_timer2
*a
= &arg
->get_cycle_timer2
;
1204 struct fw_card
*card
= client
->device
->card
;
1205 struct timespec ts
= {0, 0};
1209 local_irq_disable();
1211 cycle_time
= card
->driver
->read_csr(card
, CSR_CYCLE_TIME
);
1213 switch (a
->clk_id
) {
1214 case CLOCK_REALTIME
: getnstimeofday(&ts
); break;
1215 case CLOCK_MONOTONIC
: do_posix_clock_monotonic_gettime(&ts
); break;
1216 case CLOCK_MONOTONIC_RAW
: getrawmonotonic(&ts
); break;
1223 a
->tv_sec
= ts
.tv_sec
;
1224 a
->tv_nsec
= ts
.tv_nsec
;
1225 a
->cycle_timer
= cycle_time
;
1230 static int ioctl_get_cycle_timer(struct client
*client
, union ioctl_arg
*arg
)
1232 struct fw_cdev_get_cycle_timer
*a
= &arg
->get_cycle_timer
;
1233 struct fw_cdev_get_cycle_timer2 ct2
;
1235 ct2
.clk_id
= CLOCK_REALTIME
;
1236 ioctl_get_cycle_timer2(client
, (union ioctl_arg
*)&ct2
);
1238 a
->local_time
= ct2
.tv_sec
* USEC_PER_SEC
+ ct2
.tv_nsec
/ NSEC_PER_USEC
;
1239 a
->cycle_timer
= ct2
.cycle_timer
;
1244 static void iso_resource_work(struct work_struct
*work
)
1246 struct iso_resource_event
*e
;
1247 struct iso_resource
*r
=
1248 container_of(work
, struct iso_resource
, work
.work
);
1249 struct client
*client
= r
->client
;
1250 int generation
, channel
, bandwidth
, todo
;
1251 bool skip
, free
, success
;
1253 spin_lock_irq(&client
->lock
);
1254 generation
= client
->device
->generation
;
1256 /* Allow 1000ms grace period for other reallocations. */
1257 if (todo
== ISO_RES_ALLOC
&&
1258 time_before64(get_jiffies_64(),
1259 client
->device
->card
->reset_jiffies
+ HZ
)) {
1260 schedule_iso_resource(r
, DIV_ROUND_UP(HZ
, 3));
1263 /* We could be called twice within the same generation. */
1264 skip
= todo
== ISO_RES_REALLOC
&&
1265 r
->generation
== generation
;
1267 free
= todo
== ISO_RES_DEALLOC
||
1268 todo
== ISO_RES_ALLOC_ONCE
||
1269 todo
== ISO_RES_DEALLOC_ONCE
;
1270 r
->generation
= generation
;
1271 spin_unlock_irq(&client
->lock
);
1276 bandwidth
= r
->bandwidth
;
1278 fw_iso_resource_manage(client
->device
->card
, generation
,
1279 r
->channels
, &channel
, &bandwidth
,
1280 todo
== ISO_RES_ALLOC
||
1281 todo
== ISO_RES_REALLOC
||
1282 todo
== ISO_RES_ALLOC_ONCE
);
1284 * Is this generation outdated already? As long as this resource sticks
1285 * in the idr, it will be scheduled again for a newer generation or at
1288 if (channel
== -EAGAIN
&&
1289 (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_REALLOC
))
1292 success
= channel
>= 0 || bandwidth
> 0;
1294 spin_lock_irq(&client
->lock
);
1296 * Transit from allocation to reallocation, except if the client
1297 * requested deallocation in the meantime.
1299 if (r
->todo
== ISO_RES_ALLOC
)
1300 r
->todo
= ISO_RES_REALLOC
;
1302 * Allocation or reallocation failure? Pull this resource out of the
1303 * idr and prepare for deletion, unless the client is shutting down.
1305 if (r
->todo
== ISO_RES_REALLOC
&& !success
&&
1306 !client
->in_shutdown
&&
1307 idr_find(&client
->resource_idr
, r
->resource
.handle
)) {
1308 idr_remove(&client
->resource_idr
, r
->resource
.handle
);
1312 spin_unlock_irq(&client
->lock
);
1314 if (todo
== ISO_RES_ALLOC
&& channel
>= 0)
1315 r
->channels
= 1ULL << channel
;
1317 if (todo
== ISO_RES_REALLOC
&& success
)
1320 if (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_ALLOC_ONCE
) {
1325 r
->e_dealloc
= NULL
;
1327 e
->iso_resource
.handle
= r
->resource
.handle
;
1328 e
->iso_resource
.channel
= channel
;
1329 e
->iso_resource
.bandwidth
= bandwidth
;
1331 queue_event(client
, &e
->event
,
1332 &e
->iso_resource
, sizeof(e
->iso_resource
), NULL
, 0);
1335 cancel_delayed_work(&r
->work
);
1337 kfree(r
->e_dealloc
);
1344 static void release_iso_resource(struct client
*client
,
1345 struct client_resource
*resource
)
1347 struct iso_resource
*r
=
1348 container_of(resource
, struct iso_resource
, resource
);
1350 spin_lock_irq(&client
->lock
);
1351 r
->todo
= ISO_RES_DEALLOC
;
1352 schedule_iso_resource(r
, 0);
1353 spin_unlock_irq(&client
->lock
);
1356 static int init_iso_resource(struct client
*client
,
1357 struct fw_cdev_allocate_iso_resource
*request
, int todo
)
1359 struct iso_resource_event
*e1
, *e2
;
1360 struct iso_resource
*r
;
1363 if ((request
->channels
== 0 && request
->bandwidth
== 0) ||
1364 request
->bandwidth
> BANDWIDTH_AVAILABLE_INITIAL
)
1367 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
1368 e1
= kmalloc(sizeof(*e1
), GFP_KERNEL
);
1369 e2
= kmalloc(sizeof(*e2
), GFP_KERNEL
);
1370 if (r
== NULL
|| e1
== NULL
|| e2
== NULL
) {
1375 INIT_DELAYED_WORK(&r
->work
, iso_resource_work
);
1379 r
->channels
= request
->channels
;
1380 r
->bandwidth
= request
->bandwidth
;
1384 e1
->iso_resource
.closure
= request
->closure
;
1385 e1
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED
;
1386 e2
->iso_resource
.closure
= request
->closure
;
1387 e2
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
;
1389 if (todo
== ISO_RES_ALLOC
) {
1390 r
->resource
.release
= release_iso_resource
;
1391 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
1395 r
->resource
.release
= NULL
;
1396 r
->resource
.handle
= -1;
1397 schedule_iso_resource(r
, 0);
1399 request
->handle
= r
->resource
.handle
;
1410 static int ioctl_allocate_iso_resource(struct client
*client
,
1411 union ioctl_arg
*arg
)
1413 return init_iso_resource(client
,
1414 &arg
->allocate_iso_resource
, ISO_RES_ALLOC
);
1417 static int ioctl_deallocate_iso_resource(struct client
*client
,
1418 union ioctl_arg
*arg
)
1420 return release_client_resource(client
,
1421 arg
->deallocate
.handle
, release_iso_resource
, NULL
);
1424 static int ioctl_allocate_iso_resource_once(struct client
*client
,
1425 union ioctl_arg
*arg
)
1427 return init_iso_resource(client
,
1428 &arg
->allocate_iso_resource
, ISO_RES_ALLOC_ONCE
);
1431 static int ioctl_deallocate_iso_resource_once(struct client
*client
,
1432 union ioctl_arg
*arg
)
1434 return init_iso_resource(client
,
1435 &arg
->allocate_iso_resource
, ISO_RES_DEALLOC_ONCE
);
1439 * Returns a speed code: Maximum speed to or from this device,
1440 * limited by the device's link speed, the local node's link speed,
1441 * and all PHY port speeds between the two links.
1443 static int ioctl_get_speed(struct client
*client
, union ioctl_arg
*arg
)
1445 return client
->device
->max_speed
;
1448 static int ioctl_send_broadcast_request(struct client
*client
,
1449 union ioctl_arg
*arg
)
1451 struct fw_cdev_send_request
*a
= &arg
->send_request
;
1454 case TCODE_WRITE_QUADLET_REQUEST
:
1455 case TCODE_WRITE_BLOCK_REQUEST
:
1461 /* Security policy: Only allow accesses to Units Space. */
1462 if (a
->offset
< CSR_REGISTER_BASE
+ CSR_CONFIG_ROM_END
)
1465 return init_request(client
, a
, LOCAL_BUS
| 0x3f, SCODE_100
);
1468 static int ioctl_send_stream_packet(struct client
*client
, union ioctl_arg
*arg
)
1470 struct fw_cdev_send_stream_packet
*a
= &arg
->send_stream_packet
;
1471 struct fw_cdev_send_request request
;
1474 if (a
->speed
> client
->device
->card
->link_speed
||
1475 a
->length
> 1024 << a
->speed
)
1478 if (a
->tag
> 3 || a
->channel
> 63 || a
->sy
> 15)
1481 dest
= fw_stream_packet_destination_id(a
->tag
, a
->channel
, a
->sy
);
1482 request
.tcode
= TCODE_STREAM_DATA
;
1483 request
.length
= a
->length
;
1484 request
.closure
= a
->closure
;
1485 request
.data
= a
->data
;
1486 request
.generation
= a
->generation
;
1488 return init_request(client
, &request
, dest
, a
->speed
);
1491 static void outbound_phy_packet_callback(struct fw_packet
*packet
,
1492 struct fw_card
*card
, int status
)
1494 struct outbound_phy_packet_event
*e
=
1495 container_of(packet
, struct outbound_phy_packet_event
, p
);
1499 case ACK_COMPLETE
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1500 /* should never happen with PHY packets: */
1501 case ACK_PENDING
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1504 case ACK_BUSY_B
: e
->phy_packet
.rcode
= RCODE_BUSY
; break;
1505 case ACK_DATA_ERROR
: e
->phy_packet
.rcode
= RCODE_DATA_ERROR
; break;
1506 case ACK_TYPE_ERROR
: e
->phy_packet
.rcode
= RCODE_TYPE_ERROR
; break;
1507 /* stale generation; cancelled; on certain controllers: no ack */
1508 default: e
->phy_packet
.rcode
= status
; break;
1510 e
->phy_packet
.data
[0] = packet
->timestamp
;
1512 queue_event(e
->client
, &e
->event
, &e
->phy_packet
,
1513 sizeof(e
->phy_packet
) + e
->phy_packet
.length
, NULL
, 0);
1514 client_put(e
->client
);
1517 static int ioctl_send_phy_packet(struct client
*client
, union ioctl_arg
*arg
)
1519 struct fw_cdev_send_phy_packet
*a
= &arg
->send_phy_packet
;
1520 struct fw_card
*card
= client
->device
->card
;
1521 struct outbound_phy_packet_event
*e
;
1523 /* Access policy: Allow this ioctl only on local nodes' device files. */
1524 if (!client
->device
->is_local
)
1527 e
= kzalloc(sizeof(*e
) + 4, GFP_KERNEL
);
1533 e
->p
.speed
= SCODE_100
;
1534 e
->p
.generation
= a
->generation
;
1535 e
->p
.header
[0] = TCODE_LINK_INTERNAL
<< 4;
1536 e
->p
.header
[1] = a
->data
[0];
1537 e
->p
.header
[2] = a
->data
[1];
1538 e
->p
.header_length
= 12;
1539 e
->p
.callback
= outbound_phy_packet_callback
;
1540 e
->phy_packet
.closure
= a
->closure
;
1541 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_SENT
;
1542 if (is_ping_packet(a
->data
))
1543 e
->phy_packet
.length
= 4;
1545 card
->driver
->send_request(card
, &e
->p
);
1550 static int ioctl_receive_phy_packets(struct client
*client
, union ioctl_arg
*arg
)
1552 struct fw_cdev_receive_phy_packets
*a
= &arg
->receive_phy_packets
;
1553 struct fw_card
*card
= client
->device
->card
;
1555 /* Access policy: Allow this ioctl only on local nodes' device files. */
1556 if (!client
->device
->is_local
)
1559 spin_lock_irq(&card
->lock
);
1561 list_move_tail(&client
->phy_receiver_link
, &card
->phy_receiver_list
);
1562 client
->phy_receiver_closure
= a
->closure
;
1564 spin_unlock_irq(&card
->lock
);
1569 void fw_cdev_handle_phy_packet(struct fw_card
*card
, struct fw_packet
*p
)
1571 struct client
*client
;
1572 struct inbound_phy_packet_event
*e
;
1573 unsigned long flags
;
1575 spin_lock_irqsave(&card
->lock
, flags
);
1577 list_for_each_entry(client
, &card
->phy_receiver_list
, phy_receiver_link
) {
1578 e
= kmalloc(sizeof(*e
) + 8, GFP_ATOMIC
);
1582 e
->phy_packet
.closure
= client
->phy_receiver_closure
;
1583 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_RECEIVED
;
1584 e
->phy_packet
.rcode
= RCODE_COMPLETE
;
1585 e
->phy_packet
.length
= 8;
1586 e
->phy_packet
.data
[0] = p
->header
[1];
1587 e
->phy_packet
.data
[1] = p
->header
[2];
1588 queue_event(client
, &e
->event
,
1589 &e
->phy_packet
, sizeof(e
->phy_packet
) + 8, NULL
, 0);
1592 spin_unlock_irqrestore(&card
->lock
, flags
);
1595 static int (* const ioctl_handlers
[])(struct client
*, union ioctl_arg
*) = {
1596 [0x00] = ioctl_get_info
,
1597 [0x01] = ioctl_send_request
,
1598 [0x02] = ioctl_allocate
,
1599 [0x03] = ioctl_deallocate
,
1600 [0x04] = ioctl_send_response
,
1601 [0x05] = ioctl_initiate_bus_reset
,
1602 [0x06] = ioctl_add_descriptor
,
1603 [0x07] = ioctl_remove_descriptor
,
1604 [0x08] = ioctl_create_iso_context
,
1605 [0x09] = ioctl_queue_iso
,
1606 [0x0a] = ioctl_start_iso
,
1607 [0x0b] = ioctl_stop_iso
,
1608 [0x0c] = ioctl_get_cycle_timer
,
1609 [0x0d] = ioctl_allocate_iso_resource
,
1610 [0x0e] = ioctl_deallocate_iso_resource
,
1611 [0x0f] = ioctl_allocate_iso_resource_once
,
1612 [0x10] = ioctl_deallocate_iso_resource_once
,
1613 [0x11] = ioctl_get_speed
,
1614 [0x12] = ioctl_send_broadcast_request
,
1615 [0x13] = ioctl_send_stream_packet
,
1616 [0x14] = ioctl_get_cycle_timer2
,
1617 [0x15] = ioctl_send_phy_packet
,
1618 [0x16] = ioctl_receive_phy_packets
,
1619 [0x17] = ioctl_set_iso_channels
,
1620 [0x18] = ioctl_flush_iso
,
1623 static int dispatch_ioctl(struct client
*client
,
1624 unsigned int cmd
, void __user
*arg
)
1626 union ioctl_arg buffer
;
1629 if (fw_device_is_shutdown(client
->device
))
1632 if (_IOC_TYPE(cmd
) != '#' ||
1633 _IOC_NR(cmd
) >= ARRAY_SIZE(ioctl_handlers
) ||
1634 _IOC_SIZE(cmd
) > sizeof(buffer
))
1637 if (_IOC_DIR(cmd
) == _IOC_READ
)
1638 memset(&buffer
, 0, _IOC_SIZE(cmd
));
1640 if (_IOC_DIR(cmd
) & _IOC_WRITE
)
1641 if (copy_from_user(&buffer
, arg
, _IOC_SIZE(cmd
)))
1644 ret
= ioctl_handlers
[_IOC_NR(cmd
)](client
, &buffer
);
1648 if (_IOC_DIR(cmd
) & _IOC_READ
)
1649 if (copy_to_user(arg
, &buffer
, _IOC_SIZE(cmd
)))
1655 static long fw_device_op_ioctl(struct file
*file
,
1656 unsigned int cmd
, unsigned long arg
)
1658 return dispatch_ioctl(file
->private_data
, cmd
, (void __user
*)arg
);
1661 #ifdef CONFIG_COMPAT
1662 static long fw_device_op_compat_ioctl(struct file
*file
,
1663 unsigned int cmd
, unsigned long arg
)
1665 return dispatch_ioctl(file
->private_data
, cmd
, compat_ptr(arg
));
1669 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1671 struct client
*client
= file
->private_data
;
1673 int page_count
, ret
;
1675 if (fw_device_is_shutdown(client
->device
))
1678 /* FIXME: We could support multiple buffers, but we don't. */
1679 if (client
->buffer
.pages
!= NULL
)
1682 if (!(vma
->vm_flags
& VM_SHARED
))
1685 if (vma
->vm_start
& ~PAGE_MASK
)
1688 client
->vm_start
= vma
->vm_start
;
1689 size
= vma
->vm_end
- vma
->vm_start
;
1690 page_count
= size
>> PAGE_SHIFT
;
1691 if (size
& ~PAGE_MASK
)
1694 ret
= fw_iso_buffer_alloc(&client
->buffer
, page_count
);
1698 spin_lock_irq(&client
->lock
);
1699 if (client
->iso_context
) {
1700 ret
= fw_iso_buffer_map_dma(&client
->buffer
,
1701 client
->device
->card
,
1702 iso_dma_direction(client
->iso_context
));
1703 client
->buffer_is_mapped
= (ret
== 0);
1705 spin_unlock_irq(&client
->lock
);
1709 ret
= fw_iso_buffer_map_vma(&client
->buffer
, vma
);
1715 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1719 static int is_outbound_transaction_resource(int id
, void *p
, void *data
)
1721 struct client_resource
*resource
= p
;
1723 return resource
->release
== release_transaction
;
1726 static int has_outbound_transactions(struct client
*client
)
1730 spin_lock_irq(&client
->lock
);
1731 ret
= idr_for_each(&client
->resource_idr
,
1732 is_outbound_transaction_resource
, NULL
);
1733 spin_unlock_irq(&client
->lock
);
1738 static int shutdown_resource(int id
, void *p
, void *data
)
1740 struct client_resource
*resource
= p
;
1741 struct client
*client
= data
;
1743 resource
->release(client
, resource
);
1749 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
1751 struct client
*client
= file
->private_data
;
1752 struct event
*event
, *next_event
;
1754 spin_lock_irq(&client
->device
->card
->lock
);
1755 list_del(&client
->phy_receiver_link
);
1756 spin_unlock_irq(&client
->device
->card
->lock
);
1758 mutex_lock(&client
->device
->client_list_mutex
);
1759 list_del(&client
->link
);
1760 mutex_unlock(&client
->device
->client_list_mutex
);
1762 if (client
->iso_context
)
1763 fw_iso_context_destroy(client
->iso_context
);
1765 if (client
->buffer
.pages
)
1766 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1768 /* Freeze client->resource_idr and client->event_list */
1769 spin_lock_irq(&client
->lock
);
1770 client
->in_shutdown
= true;
1771 spin_unlock_irq(&client
->lock
);
1773 wait_event(client
->tx_flush_wait
, !has_outbound_transactions(client
));
1775 idr_for_each(&client
->resource_idr
, shutdown_resource
, client
);
1776 idr_destroy(&client
->resource_idr
);
1778 list_for_each_entry_safe(event
, next_event
, &client
->event_list
, link
)
1786 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
1788 struct client
*client
= file
->private_data
;
1789 unsigned int mask
= 0;
1791 poll_wait(file
, &client
->wait
, pt
);
1793 if (fw_device_is_shutdown(client
->device
))
1794 mask
|= POLLHUP
| POLLERR
;
1795 if (!list_empty(&client
->event_list
))
1796 mask
|= POLLIN
| POLLRDNORM
;
1801 const struct file_operations fw_device_ops
= {
1802 .owner
= THIS_MODULE
,
1803 .llseek
= no_llseek
,
1804 .open
= fw_device_op_open
,
1805 .read
= fw_device_op_read
,
1806 .unlocked_ioctl
= fw_device_op_ioctl
,
1807 .mmap
= fw_device_op_mmap
,
1808 .release
= fw_device_op_release
,
1809 .poll
= fw_device_op_poll
,
1810 #ifdef CONFIG_COMPAT
1811 .compat_ioctl
= fw_device_op_compat_ioctl
,