2 * Char device for device raw access
4 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bug.h>
22 #include <linux/compat.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/errno.h>
26 #include <linux/firewire.h>
27 #include <linux/firewire-cdev.h>
28 #include <linux/idr.h>
29 #include <linux/irqflags.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/kref.h>
34 #include <linux/module.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h> /* required for linux/wait.h */
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/time.h>
42 #include <linux/uaccess.h>
43 #include <linux/vmalloc.h>
44 #include <linux/wait.h>
45 #include <linux/workqueue.h>
47 #include <asm/system.h>
52 * ABI version history is documented in linux/firewire-cdev.h.
54 #define FW_CDEV_KERNEL_VERSION 4
55 #define FW_CDEV_VERSION_EVENT_REQUEST2 4
56 #define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
60 struct fw_device
*device
;
64 struct idr resource_idr
;
65 struct list_head event_list
;
66 wait_queue_head_t wait
;
67 u64 bus_reset_closure
;
69 struct fw_iso_context
*iso_context
;
71 struct fw_iso_buffer buffer
;
72 unsigned long vm_start
;
74 struct list_head phy_receiver_link
;
75 u64 phy_receiver_closure
;
77 struct list_head link
;
81 static inline void client_get(struct client
*client
)
83 kref_get(&client
->kref
);
86 static void client_release(struct kref
*kref
)
88 struct client
*client
= container_of(kref
, struct client
, kref
);
90 fw_device_put(client
->device
);
94 static void client_put(struct client
*client
)
96 kref_put(&client
->kref
, client_release
);
99 struct client_resource
;
100 typedef void (*client_resource_release_fn_t
)(struct client
*,
101 struct client_resource
*);
102 struct client_resource
{
103 client_resource_release_fn_t release
;
107 struct address_handler_resource
{
108 struct client_resource resource
;
109 struct fw_address_handler handler
;
111 struct client
*client
;
114 struct outbound_transaction_resource
{
115 struct client_resource resource
;
116 struct fw_transaction transaction
;
119 struct inbound_transaction_resource
{
120 struct client_resource resource
;
121 struct fw_card
*card
;
122 struct fw_request
*request
;
127 struct descriptor_resource
{
128 struct client_resource resource
;
129 struct fw_descriptor descriptor
;
133 struct iso_resource
{
134 struct client_resource resource
;
135 struct client
*client
;
136 /* Schedule work and access todo only with client->lock held. */
137 struct delayed_work work
;
138 enum {ISO_RES_ALLOC
, ISO_RES_REALLOC
, ISO_RES_DEALLOC
,
139 ISO_RES_ALLOC_ONCE
, ISO_RES_DEALLOC_ONCE
,} todo
;
143 __be32 transaction_data
[2];
144 struct iso_resource_event
*e_alloc
, *e_dealloc
;
147 static void release_iso_resource(struct client
*, struct client_resource
*);
149 static void schedule_iso_resource(struct iso_resource
*r
, unsigned long delay
)
151 client_get(r
->client
);
152 if (!schedule_delayed_work(&r
->work
, delay
))
153 client_put(r
->client
);
156 static void schedule_if_iso_resource(struct client_resource
*resource
)
158 if (resource
->release
== release_iso_resource
)
159 schedule_iso_resource(container_of(resource
,
160 struct iso_resource
, resource
), 0);
164 * dequeue_event() just kfree()'s the event, so the event has to be
165 * the first field in a struct XYZ_event.
168 struct { void *data
; size_t size
; } v
[2];
169 struct list_head link
;
172 struct bus_reset_event
{
174 struct fw_cdev_event_bus_reset reset
;
177 struct outbound_transaction_event
{
179 struct client
*client
;
180 struct outbound_transaction_resource r
;
181 struct fw_cdev_event_response response
;
184 struct inbound_transaction_event
{
187 struct fw_cdev_event_request request
;
188 struct fw_cdev_event_request2 request2
;
192 struct iso_interrupt_event
{
194 struct fw_cdev_event_iso_interrupt interrupt
;
197 struct iso_interrupt_mc_event
{
199 struct fw_cdev_event_iso_interrupt_mc interrupt
;
202 struct iso_resource_event
{
204 struct fw_cdev_event_iso_resource iso_resource
;
207 struct outbound_phy_packet_event
{
209 struct client
*client
;
211 struct fw_cdev_event_phy_packet phy_packet
;
214 struct inbound_phy_packet_event
{
216 struct fw_cdev_event_phy_packet phy_packet
;
219 static inline void __user
*u64_to_uptr(__u64 value
)
221 return (void __user
*)(unsigned long)value
;
224 static inline __u64
uptr_to_u64(void __user
*ptr
)
226 return (__u64
)(unsigned long)ptr
;
229 static int fw_device_op_open(struct inode
*inode
, struct file
*file
)
231 struct fw_device
*device
;
232 struct client
*client
;
234 device
= fw_device_get_by_devt(inode
->i_rdev
);
238 if (fw_device_is_shutdown(device
)) {
239 fw_device_put(device
);
243 client
= kzalloc(sizeof(*client
), GFP_KERNEL
);
244 if (client
== NULL
) {
245 fw_device_put(device
);
249 client
->device
= device
;
250 spin_lock_init(&client
->lock
);
251 idr_init(&client
->resource_idr
);
252 INIT_LIST_HEAD(&client
->event_list
);
253 init_waitqueue_head(&client
->wait
);
254 INIT_LIST_HEAD(&client
->phy_receiver_link
);
255 kref_init(&client
->kref
);
257 file
->private_data
= client
;
259 mutex_lock(&device
->client_list_mutex
);
260 list_add_tail(&client
->link
, &device
->client_list
);
261 mutex_unlock(&device
->client_list_mutex
);
263 return nonseekable_open(inode
, file
);
266 static void queue_event(struct client
*client
, struct event
*event
,
267 void *data0
, size_t size0
, void *data1
, size_t size1
)
271 event
->v
[0].data
= data0
;
272 event
->v
[0].size
= size0
;
273 event
->v
[1].data
= data1
;
274 event
->v
[1].size
= size1
;
276 spin_lock_irqsave(&client
->lock
, flags
);
277 if (client
->in_shutdown
)
280 list_add_tail(&event
->link
, &client
->event_list
);
281 spin_unlock_irqrestore(&client
->lock
, flags
);
283 wake_up_interruptible(&client
->wait
);
286 static int dequeue_event(struct client
*client
,
287 char __user
*buffer
, size_t count
)
293 ret
= wait_event_interruptible(client
->wait
,
294 !list_empty(&client
->event_list
) ||
295 fw_device_is_shutdown(client
->device
));
299 if (list_empty(&client
->event_list
) &&
300 fw_device_is_shutdown(client
->device
))
303 spin_lock_irq(&client
->lock
);
304 event
= list_first_entry(&client
->event_list
, struct event
, link
);
305 list_del(&event
->link
);
306 spin_unlock_irq(&client
->lock
);
309 for (i
= 0; i
< ARRAY_SIZE(event
->v
) && total
< count
; i
++) {
310 size
= min(event
->v
[i
].size
, count
- total
);
311 if (copy_to_user(buffer
+ total
, event
->v
[i
].data
, size
)) {
325 static ssize_t
fw_device_op_read(struct file
*file
, char __user
*buffer
,
326 size_t count
, loff_t
*offset
)
328 struct client
*client
= file
->private_data
;
330 return dequeue_event(client
, buffer
, count
);
333 static void fill_bus_reset_event(struct fw_cdev_event_bus_reset
*event
,
334 struct client
*client
)
336 struct fw_card
*card
= client
->device
->card
;
338 spin_lock_irq(&card
->lock
);
340 event
->closure
= client
->bus_reset_closure
;
341 event
->type
= FW_CDEV_EVENT_BUS_RESET
;
342 event
->generation
= client
->device
->generation
;
343 event
->node_id
= client
->device
->node_id
;
344 event
->local_node_id
= card
->local_node
->node_id
;
345 event
->bm_node_id
= card
->bm_node_id
;
346 event
->irm_node_id
= card
->irm_node
->node_id
;
347 event
->root_node_id
= card
->root_node
->node_id
;
349 spin_unlock_irq(&card
->lock
);
352 static void for_each_client(struct fw_device
*device
,
353 void (*callback
)(struct client
*client
))
357 mutex_lock(&device
->client_list_mutex
);
358 list_for_each_entry(c
, &device
->client_list
, link
)
360 mutex_unlock(&device
->client_list_mutex
);
363 static int schedule_reallocations(int id
, void *p
, void *data
)
365 schedule_if_iso_resource(p
);
370 static void queue_bus_reset_event(struct client
*client
)
372 struct bus_reset_event
*e
;
374 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
376 fw_notify("Out of memory when allocating event\n");
380 fill_bus_reset_event(&e
->reset
, client
);
382 queue_event(client
, &e
->event
,
383 &e
->reset
, sizeof(e
->reset
), NULL
, 0);
385 spin_lock_irq(&client
->lock
);
386 idr_for_each(&client
->resource_idr
, schedule_reallocations
, client
);
387 spin_unlock_irq(&client
->lock
);
390 void fw_device_cdev_update(struct fw_device
*device
)
392 for_each_client(device
, queue_bus_reset_event
);
395 static void wake_up_client(struct client
*client
)
397 wake_up_interruptible(&client
->wait
);
400 void fw_device_cdev_remove(struct fw_device
*device
)
402 for_each_client(device
, wake_up_client
);
406 struct fw_cdev_get_info get_info
;
407 struct fw_cdev_send_request send_request
;
408 struct fw_cdev_allocate allocate
;
409 struct fw_cdev_deallocate deallocate
;
410 struct fw_cdev_send_response send_response
;
411 struct fw_cdev_initiate_bus_reset initiate_bus_reset
;
412 struct fw_cdev_add_descriptor add_descriptor
;
413 struct fw_cdev_remove_descriptor remove_descriptor
;
414 struct fw_cdev_create_iso_context create_iso_context
;
415 struct fw_cdev_queue_iso queue_iso
;
416 struct fw_cdev_start_iso start_iso
;
417 struct fw_cdev_stop_iso stop_iso
;
418 struct fw_cdev_get_cycle_timer get_cycle_timer
;
419 struct fw_cdev_allocate_iso_resource allocate_iso_resource
;
420 struct fw_cdev_send_stream_packet send_stream_packet
;
421 struct fw_cdev_get_cycle_timer2 get_cycle_timer2
;
422 struct fw_cdev_send_phy_packet send_phy_packet
;
423 struct fw_cdev_receive_phy_packets receive_phy_packets
;
424 struct fw_cdev_set_iso_channels set_iso_channels
;
427 static int ioctl_get_info(struct client
*client
, union ioctl_arg
*arg
)
429 struct fw_cdev_get_info
*a
= &arg
->get_info
;
430 struct fw_cdev_event_bus_reset bus_reset
;
431 unsigned long ret
= 0;
433 client
->version
= a
->version
;
434 a
->version
= FW_CDEV_KERNEL_VERSION
;
435 a
->card
= client
->device
->card
->index
;
437 down_read(&fw_device_rwsem
);
440 size_t want
= a
->rom_length
;
441 size_t have
= client
->device
->config_rom_length
* 4;
443 ret
= copy_to_user(u64_to_uptr(a
->rom
),
444 client
->device
->config_rom
, min(want
, have
));
446 a
->rom_length
= client
->device
->config_rom_length
* 4;
448 up_read(&fw_device_rwsem
);
453 client
->bus_reset_closure
= a
->bus_reset_closure
;
454 if (a
->bus_reset
!= 0) {
455 fill_bus_reset_event(&bus_reset
, client
);
456 if (copy_to_user(u64_to_uptr(a
->bus_reset
),
457 &bus_reset
, sizeof(bus_reset
)))
464 static int add_client_resource(struct client
*client
,
465 struct client_resource
*resource
, gfp_t gfp_mask
)
471 if (idr_pre_get(&client
->resource_idr
, gfp_mask
) == 0)
474 spin_lock_irqsave(&client
->lock
, flags
);
475 if (client
->in_shutdown
)
478 ret
= idr_get_new(&client
->resource_idr
, resource
,
482 schedule_if_iso_resource(resource
);
484 spin_unlock_irqrestore(&client
->lock
, flags
);
489 return ret
< 0 ? ret
: 0;
492 static int release_client_resource(struct client
*client
, u32 handle
,
493 client_resource_release_fn_t release
,
494 struct client_resource
**return_resource
)
496 struct client_resource
*resource
;
498 spin_lock_irq(&client
->lock
);
499 if (client
->in_shutdown
)
502 resource
= idr_find(&client
->resource_idr
, handle
);
503 if (resource
&& resource
->release
== release
)
504 idr_remove(&client
->resource_idr
, handle
);
505 spin_unlock_irq(&client
->lock
);
507 if (!(resource
&& resource
->release
== release
))
511 *return_resource
= resource
;
513 resource
->release(client
, resource
);
520 static void release_transaction(struct client
*client
,
521 struct client_resource
*resource
)
523 struct outbound_transaction_resource
*r
= container_of(resource
,
524 struct outbound_transaction_resource
, resource
);
526 fw_cancel_transaction(client
->device
->card
, &r
->transaction
);
529 static void complete_transaction(struct fw_card
*card
, int rcode
,
530 void *payload
, size_t length
, void *data
)
532 struct outbound_transaction_event
*e
= data
;
533 struct fw_cdev_event_response
*rsp
= &e
->response
;
534 struct client
*client
= e
->client
;
537 if (length
< rsp
->length
)
538 rsp
->length
= length
;
539 if (rcode
== RCODE_COMPLETE
)
540 memcpy(rsp
->data
, payload
, rsp
->length
);
542 spin_lock_irqsave(&client
->lock
, flags
);
544 * 1. If called while in shutdown, the idr tree must be left untouched.
545 * The idr handle will be removed and the client reference will be
547 * 2. If the call chain was release_client_resource ->
548 * release_transaction -> complete_transaction (instead of a normal
549 * conclusion of the transaction), i.e. if this resource was already
550 * unregistered from the idr, the client reference will be dropped
551 * by release_client_resource and we must not drop it here.
553 if (!client
->in_shutdown
&&
554 idr_find(&client
->resource_idr
, e
->r
.resource
.handle
)) {
555 idr_remove(&client
->resource_idr
, e
->r
.resource
.handle
);
556 /* Drop the idr's reference */
559 spin_unlock_irqrestore(&client
->lock
, flags
);
561 rsp
->type
= FW_CDEV_EVENT_RESPONSE
;
565 * In the case that sizeof(*rsp) doesn't align with the position of the
566 * data, and the read is short, preserve an extra copy of the data
567 * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
568 * for short reads and some apps depended on it, this is both safe
569 * and prudent for compatibility.
571 if (rsp
->length
<= sizeof(*rsp
) - offsetof(typeof(*rsp
), data
))
572 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
),
573 rsp
->data
, rsp
->length
);
575 queue_event(client
, &e
->event
, rsp
, sizeof(*rsp
) + rsp
->length
,
578 /* Drop the transaction callback's reference */
582 static int init_request(struct client
*client
,
583 struct fw_cdev_send_request
*request
,
584 int destination_id
, int speed
)
586 struct outbound_transaction_event
*e
;
589 if (request
->tcode
!= TCODE_STREAM_DATA
&&
590 (request
->length
> 4096 || request
->length
> 512 << speed
))
593 if (request
->tcode
== TCODE_WRITE_QUADLET_REQUEST
&&
597 e
= kmalloc(sizeof(*e
) + request
->length
, GFP_KERNEL
);
602 e
->response
.length
= request
->length
;
603 e
->response
.closure
= request
->closure
;
606 copy_from_user(e
->response
.data
,
607 u64_to_uptr(request
->data
), request
->length
)) {
612 e
->r
.resource
.release
= release_transaction
;
613 ret
= add_client_resource(client
, &e
->r
.resource
, GFP_KERNEL
);
617 /* Get a reference for the transaction callback */
620 fw_send_request(client
->device
->card
, &e
->r
.transaction
,
621 request
->tcode
, destination_id
, request
->generation
,
622 speed
, request
->offset
, e
->response
.data
,
623 request
->length
, complete_transaction
, e
);
632 static int ioctl_send_request(struct client
*client
, union ioctl_arg
*arg
)
634 switch (arg
->send_request
.tcode
) {
635 case TCODE_WRITE_QUADLET_REQUEST
:
636 case TCODE_WRITE_BLOCK_REQUEST
:
637 case TCODE_READ_QUADLET_REQUEST
:
638 case TCODE_READ_BLOCK_REQUEST
:
639 case TCODE_LOCK_MASK_SWAP
:
640 case TCODE_LOCK_COMPARE_SWAP
:
641 case TCODE_LOCK_FETCH_ADD
:
642 case TCODE_LOCK_LITTLE_ADD
:
643 case TCODE_LOCK_BOUNDED_ADD
:
644 case TCODE_LOCK_WRAP_ADD
:
645 case TCODE_LOCK_VENDOR_DEPENDENT
:
651 return init_request(client
, &arg
->send_request
, client
->device
->node_id
,
652 client
->device
->max_speed
);
655 static inline bool is_fcp_request(struct fw_request
*request
)
657 return request
== NULL
;
660 static void release_request(struct client
*client
,
661 struct client_resource
*resource
)
663 struct inbound_transaction_resource
*r
= container_of(resource
,
664 struct inbound_transaction_resource
, resource
);
666 if (is_fcp_request(r
->request
))
669 fw_send_response(r
->card
, r
->request
, RCODE_CONFLICT_ERROR
);
671 fw_card_put(r
->card
);
675 static void handle_request(struct fw_card
*card
, struct fw_request
*request
,
676 int tcode
, int destination
, int source
,
677 int generation
, unsigned long long offset
,
678 void *payload
, size_t length
, void *callback_data
)
680 struct address_handler_resource
*handler
= callback_data
;
681 struct inbound_transaction_resource
*r
;
682 struct inbound_transaction_event
*e
;
684 void *fcp_frame
= NULL
;
687 /* card may be different from handler->client->device->card */
690 r
= kmalloc(sizeof(*r
), GFP_ATOMIC
);
691 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
692 if (r
== NULL
|| e
== NULL
) {
693 fw_notify("Out of memory when allocating event\n");
697 r
->request
= request
;
701 if (is_fcp_request(request
)) {
702 fcp_frame
= kmemdup(payload
, length
, GFP_ATOMIC
);
703 if (fcp_frame
== NULL
)
709 r
->resource
.release
= release_request
;
710 ret
= add_client_resource(handler
->client
, &r
->resource
, GFP_ATOMIC
);
714 if (handler
->client
->version
< FW_CDEV_VERSION_EVENT_REQUEST2
) {
715 struct fw_cdev_event_request
*req
= &e
->req
.request
;
718 tcode
= TCODE_LOCK_REQUEST
;
720 req
->type
= FW_CDEV_EVENT_REQUEST
;
722 req
->offset
= offset
;
723 req
->length
= length
;
724 req
->handle
= r
->resource
.handle
;
725 req
->closure
= handler
->closure
;
726 event_size0
= sizeof(*req
);
728 struct fw_cdev_event_request2
*req
= &e
->req
.request2
;
730 req
->type
= FW_CDEV_EVENT_REQUEST2
;
732 req
->offset
= offset
;
733 req
->source_node_id
= source
;
734 req
->destination_node_id
= destination
;
735 req
->card
= card
->index
;
736 req
->generation
= generation
;
737 req
->length
= length
;
738 req
->handle
= r
->resource
.handle
;
739 req
->closure
= handler
->closure
;
740 event_size0
= sizeof(*req
);
743 queue_event(handler
->client
, &e
->event
,
744 &e
->req
, event_size0
, r
->data
, length
);
752 if (!is_fcp_request(request
))
753 fw_send_response(card
, request
, RCODE_CONFLICT_ERROR
);
758 static void release_address_handler(struct client
*client
,
759 struct client_resource
*resource
)
761 struct address_handler_resource
*r
=
762 container_of(resource
, struct address_handler_resource
, resource
);
764 fw_core_remove_address_handler(&r
->handler
);
768 static int ioctl_allocate(struct client
*client
, union ioctl_arg
*arg
)
770 struct fw_cdev_allocate
*a
= &arg
->allocate
;
771 struct address_handler_resource
*r
;
772 struct fw_address_region region
;
775 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
779 region
.start
= a
->offset
;
780 if (client
->version
< FW_CDEV_VERSION_ALLOCATE_REGION_END
)
781 region
.end
= a
->offset
+ a
->length
;
783 region
.end
= a
->region_end
;
785 r
->handler
.length
= a
->length
;
786 r
->handler
.address_callback
= handle_request
;
787 r
->handler
.callback_data
= r
;
788 r
->closure
= a
->closure
;
791 ret
= fw_core_add_address_handler(&r
->handler
, ®ion
);
796 a
->offset
= r
->handler
.offset
;
798 r
->resource
.release
= release_address_handler
;
799 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
801 release_address_handler(client
, &r
->resource
);
804 a
->handle
= r
->resource
.handle
;
809 static int ioctl_deallocate(struct client
*client
, union ioctl_arg
*arg
)
811 return release_client_resource(client
, arg
->deallocate
.handle
,
812 release_address_handler
, NULL
);
815 static int ioctl_send_response(struct client
*client
, union ioctl_arg
*arg
)
817 struct fw_cdev_send_response
*a
= &arg
->send_response
;
818 struct client_resource
*resource
;
819 struct inbound_transaction_resource
*r
;
822 if (release_client_resource(client
, a
->handle
,
823 release_request
, &resource
) < 0)
826 r
= container_of(resource
, struct inbound_transaction_resource
,
828 if (is_fcp_request(r
->request
))
831 if (a
->length
!= fw_get_response_length(r
->request
)) {
836 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
)) {
841 fw_send_response(r
->card
, r
->request
, a
->rcode
);
843 fw_card_put(r
->card
);
849 static int ioctl_initiate_bus_reset(struct client
*client
, union ioctl_arg
*arg
)
851 fw_schedule_bus_reset(client
->device
->card
, true,
852 arg
->initiate_bus_reset
.type
== FW_CDEV_SHORT_RESET
);
856 static void release_descriptor(struct client
*client
,
857 struct client_resource
*resource
)
859 struct descriptor_resource
*r
=
860 container_of(resource
, struct descriptor_resource
, resource
);
862 fw_core_remove_descriptor(&r
->descriptor
);
866 static int ioctl_add_descriptor(struct client
*client
, union ioctl_arg
*arg
)
868 struct fw_cdev_add_descriptor
*a
= &arg
->add_descriptor
;
869 struct descriptor_resource
*r
;
872 /* Access policy: Allow this ioctl only on local nodes' device files. */
873 if (!client
->device
->is_local
)
879 r
= kmalloc(sizeof(*r
) + a
->length
* 4, GFP_KERNEL
);
883 if (copy_from_user(r
->data
, u64_to_uptr(a
->data
), a
->length
* 4)) {
888 r
->descriptor
.length
= a
->length
;
889 r
->descriptor
.immediate
= a
->immediate
;
890 r
->descriptor
.key
= a
->key
;
891 r
->descriptor
.data
= r
->data
;
893 ret
= fw_core_add_descriptor(&r
->descriptor
);
897 r
->resource
.release
= release_descriptor
;
898 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
900 fw_core_remove_descriptor(&r
->descriptor
);
903 a
->handle
= r
->resource
.handle
;
912 static int ioctl_remove_descriptor(struct client
*client
, union ioctl_arg
*arg
)
914 return release_client_resource(client
, arg
->remove_descriptor
.handle
,
915 release_descriptor
, NULL
);
918 static void iso_callback(struct fw_iso_context
*context
, u32 cycle
,
919 size_t header_length
, void *header
, void *data
)
921 struct client
*client
= data
;
922 struct iso_interrupt_event
*e
;
924 e
= kmalloc(sizeof(*e
) + header_length
, GFP_ATOMIC
);
926 fw_notify("Out of memory when allocating event\n");
929 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT
;
930 e
->interrupt
.closure
= client
->iso_closure
;
931 e
->interrupt
.cycle
= cycle
;
932 e
->interrupt
.header_length
= header_length
;
933 memcpy(e
->interrupt
.header
, header
, header_length
);
934 queue_event(client
, &e
->event
, &e
->interrupt
,
935 sizeof(e
->interrupt
) + header_length
, NULL
, 0);
938 static void iso_mc_callback(struct fw_iso_context
*context
,
939 dma_addr_t completed
, void *data
)
941 struct client
*client
= data
;
942 struct iso_interrupt_mc_event
*e
;
944 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
946 fw_notify("Out of memory when allocating event\n");
949 e
->interrupt
.type
= FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
;
950 e
->interrupt
.closure
= client
->iso_closure
;
951 e
->interrupt
.completed
= fw_iso_buffer_lookup(&client
->buffer
,
953 queue_event(client
, &e
->event
, &e
->interrupt
,
954 sizeof(e
->interrupt
), NULL
, 0);
957 static int ioctl_create_iso_context(struct client
*client
, union ioctl_arg
*arg
)
959 struct fw_cdev_create_iso_context
*a
= &arg
->create_iso_context
;
960 struct fw_iso_context
*context
;
961 fw_iso_callback_t cb
;
963 BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT
!= FW_ISO_CONTEXT_TRANSMIT
||
964 FW_CDEV_ISO_CONTEXT_RECEIVE
!= FW_ISO_CONTEXT_RECEIVE
||
965 FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL
!=
966 FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
);
969 case FW_ISO_CONTEXT_TRANSMIT
:
970 if (a
->speed
> SCODE_3200
|| a
->channel
> 63)
976 case FW_ISO_CONTEXT_RECEIVE
:
977 if (a
->header_size
< 4 || (a
->header_size
& 3) ||
984 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
985 cb
= (fw_iso_callback_t
)iso_mc_callback
;
992 context
= fw_iso_context_create(client
->device
->card
, a
->type
,
993 a
->channel
, a
->speed
, a
->header_size
, cb
, client
);
995 return PTR_ERR(context
);
997 /* We only support one context at this time. */
998 spin_lock_irq(&client
->lock
);
999 if (client
->iso_context
!= NULL
) {
1000 spin_unlock_irq(&client
->lock
);
1001 fw_iso_context_destroy(context
);
1004 client
->iso_closure
= a
->closure
;
1005 client
->iso_context
= context
;
1006 spin_unlock_irq(&client
->lock
);
1013 static int ioctl_set_iso_channels(struct client
*client
, union ioctl_arg
*arg
)
1015 struct fw_cdev_set_iso_channels
*a
= &arg
->set_iso_channels
;
1016 struct fw_iso_context
*ctx
= client
->iso_context
;
1018 if (ctx
== NULL
|| a
->handle
!= 0)
1021 return fw_iso_context_set_channels(ctx
, &a
->channels
);
1024 /* Macros for decoding the iso packet control header. */
1025 #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
1026 #define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
1027 #define GET_SKIP(v) (((v) >> 17) & 0x01)
1028 #define GET_TAG(v) (((v) >> 18) & 0x03)
1029 #define GET_SY(v) (((v) >> 20) & 0x0f)
1030 #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
1032 static int ioctl_queue_iso(struct client
*client
, union ioctl_arg
*arg
)
1034 struct fw_cdev_queue_iso
*a
= &arg
->queue_iso
;
1035 struct fw_cdev_iso_packet __user
*p
, *end
, *next
;
1036 struct fw_iso_context
*ctx
= client
->iso_context
;
1037 unsigned long payload
, buffer_end
, transmit_header_bytes
= 0;
1041 struct fw_iso_packet packet
;
1045 if (ctx
== NULL
|| a
->handle
!= 0)
1049 * If the user passes a non-NULL data pointer, has mmap()'ed
1050 * the iso buffer, and the pointer points inside the buffer,
1051 * we setup the payload pointers accordingly. Otherwise we
1052 * set them both to 0, which will still let packets with
1053 * payload_length == 0 through. In other words, if no packets
1054 * use the indirect payload, the iso buffer need not be mapped
1055 * and the a->data pointer is ignored.
1057 payload
= (unsigned long)a
->data
- client
->vm_start
;
1058 buffer_end
= client
->buffer
.page_count
<< PAGE_SHIFT
;
1059 if (a
->data
== 0 || client
->buffer
.pages
== NULL
||
1060 payload
>= buffer_end
) {
1065 if (ctx
->type
== FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
&& payload
& 3)
1068 p
= (struct fw_cdev_iso_packet __user
*)u64_to_uptr(a
->packets
);
1069 if (!access_ok(VERIFY_READ
, p
, a
->size
))
1072 end
= (void __user
*)p
+ a
->size
;
1075 if (get_user(control
, &p
->control
))
1077 u
.packet
.payload_length
= GET_PAYLOAD_LENGTH(control
);
1078 u
.packet
.interrupt
= GET_INTERRUPT(control
);
1079 u
.packet
.skip
= GET_SKIP(control
);
1080 u
.packet
.tag
= GET_TAG(control
);
1081 u
.packet
.sy
= GET_SY(control
);
1082 u
.packet
.header_length
= GET_HEADER_LENGTH(control
);
1084 switch (ctx
->type
) {
1085 case FW_ISO_CONTEXT_TRANSMIT
:
1086 if (u
.packet
.header_length
& 3)
1088 transmit_header_bytes
= u
.packet
.header_length
;
1091 case FW_ISO_CONTEXT_RECEIVE
:
1092 if (u
.packet
.header_length
== 0 ||
1093 u
.packet
.header_length
% ctx
->header_size
!= 0)
1097 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL
:
1098 if (u
.packet
.payload_length
== 0 ||
1099 u
.packet
.payload_length
& 3)
1104 next
= (struct fw_cdev_iso_packet __user
*)
1105 &p
->header
[transmit_header_bytes
/ 4];
1108 if (__copy_from_user
1109 (u
.packet
.header
, p
->header
, transmit_header_bytes
))
1111 if (u
.packet
.skip
&& ctx
->type
== FW_ISO_CONTEXT_TRANSMIT
&&
1112 u
.packet
.header_length
+ u
.packet
.payload_length
> 0)
1114 if (payload
+ u
.packet
.payload_length
> buffer_end
)
1117 if (fw_iso_context_queue(ctx
, &u
.packet
,
1118 &client
->buffer
, payload
))
1122 payload
+= u
.packet
.payload_length
;
1126 a
->size
-= uptr_to_u64(p
) - a
->packets
;
1127 a
->packets
= uptr_to_u64(p
);
1128 a
->data
= client
->vm_start
+ payload
;
1133 static int ioctl_start_iso(struct client
*client
, union ioctl_arg
*arg
)
1135 struct fw_cdev_start_iso
*a
= &arg
->start_iso
;
1138 FW_CDEV_ISO_CONTEXT_MATCH_TAG0
!= FW_ISO_CONTEXT_MATCH_TAG0
||
1139 FW_CDEV_ISO_CONTEXT_MATCH_TAG1
!= FW_ISO_CONTEXT_MATCH_TAG1
||
1140 FW_CDEV_ISO_CONTEXT_MATCH_TAG2
!= FW_ISO_CONTEXT_MATCH_TAG2
||
1141 FW_CDEV_ISO_CONTEXT_MATCH_TAG3
!= FW_ISO_CONTEXT_MATCH_TAG3
||
1142 FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS
!= FW_ISO_CONTEXT_MATCH_ALL_TAGS
);
1144 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1147 if (client
->iso_context
->type
== FW_ISO_CONTEXT_RECEIVE
&&
1148 (a
->tags
== 0 || a
->tags
> 15 || a
->sync
> 15))
1151 return fw_iso_context_start(client
->iso_context
,
1152 a
->cycle
, a
->sync
, a
->tags
);
1155 static int ioctl_stop_iso(struct client
*client
, union ioctl_arg
*arg
)
1157 struct fw_cdev_stop_iso
*a
= &arg
->stop_iso
;
1159 if (client
->iso_context
== NULL
|| a
->handle
!= 0)
1162 return fw_iso_context_stop(client
->iso_context
);
1165 static int ioctl_get_cycle_timer2(struct client
*client
, union ioctl_arg
*arg
)
1167 struct fw_cdev_get_cycle_timer2
*a
= &arg
->get_cycle_timer2
;
1168 struct fw_card
*card
= client
->device
->card
;
1169 struct timespec ts
= {0, 0};
1173 local_irq_disable();
1175 cycle_time
= card
->driver
->read_csr(card
, CSR_CYCLE_TIME
);
1177 switch (a
->clk_id
) {
1178 case CLOCK_REALTIME
: getnstimeofday(&ts
); break;
1179 case CLOCK_MONOTONIC
: do_posix_clock_monotonic_gettime(&ts
); break;
1180 case CLOCK_MONOTONIC_RAW
: getrawmonotonic(&ts
); break;
1187 a
->tv_sec
= ts
.tv_sec
;
1188 a
->tv_nsec
= ts
.tv_nsec
;
1189 a
->cycle_timer
= cycle_time
;
1194 static int ioctl_get_cycle_timer(struct client
*client
, union ioctl_arg
*arg
)
1196 struct fw_cdev_get_cycle_timer
*a
= &arg
->get_cycle_timer
;
1197 struct fw_cdev_get_cycle_timer2 ct2
;
1199 ct2
.clk_id
= CLOCK_REALTIME
;
1200 ioctl_get_cycle_timer2(client
, (union ioctl_arg
*)&ct2
);
1202 a
->local_time
= ct2
.tv_sec
* USEC_PER_SEC
+ ct2
.tv_nsec
/ NSEC_PER_USEC
;
1203 a
->cycle_timer
= ct2
.cycle_timer
;
1208 static void iso_resource_work(struct work_struct
*work
)
1210 struct iso_resource_event
*e
;
1211 struct iso_resource
*r
=
1212 container_of(work
, struct iso_resource
, work
.work
);
1213 struct client
*client
= r
->client
;
1214 int generation
, channel
, bandwidth
, todo
;
1215 bool skip
, free
, success
;
1217 spin_lock_irq(&client
->lock
);
1218 generation
= client
->device
->generation
;
1220 /* Allow 1000ms grace period for other reallocations. */
1221 if (todo
== ISO_RES_ALLOC
&&
1222 time_is_after_jiffies(client
->device
->card
->reset_jiffies
+ HZ
)) {
1223 schedule_iso_resource(r
, DIV_ROUND_UP(HZ
, 3));
1226 /* We could be called twice within the same generation. */
1227 skip
= todo
== ISO_RES_REALLOC
&&
1228 r
->generation
== generation
;
1230 free
= todo
== ISO_RES_DEALLOC
||
1231 todo
== ISO_RES_ALLOC_ONCE
||
1232 todo
== ISO_RES_DEALLOC_ONCE
;
1233 r
->generation
= generation
;
1234 spin_unlock_irq(&client
->lock
);
1239 bandwidth
= r
->bandwidth
;
1241 fw_iso_resource_manage(client
->device
->card
, generation
,
1242 r
->channels
, &channel
, &bandwidth
,
1243 todo
== ISO_RES_ALLOC
||
1244 todo
== ISO_RES_REALLOC
||
1245 todo
== ISO_RES_ALLOC_ONCE
,
1246 r
->transaction_data
);
1248 * Is this generation outdated already? As long as this resource sticks
1249 * in the idr, it will be scheduled again for a newer generation or at
1252 if (channel
== -EAGAIN
&&
1253 (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_REALLOC
))
1256 success
= channel
>= 0 || bandwidth
> 0;
1258 spin_lock_irq(&client
->lock
);
1260 * Transit from allocation to reallocation, except if the client
1261 * requested deallocation in the meantime.
1263 if (r
->todo
== ISO_RES_ALLOC
)
1264 r
->todo
= ISO_RES_REALLOC
;
1266 * Allocation or reallocation failure? Pull this resource out of the
1267 * idr and prepare for deletion, unless the client is shutting down.
1269 if (r
->todo
== ISO_RES_REALLOC
&& !success
&&
1270 !client
->in_shutdown
&&
1271 idr_find(&client
->resource_idr
, r
->resource
.handle
)) {
1272 idr_remove(&client
->resource_idr
, r
->resource
.handle
);
1276 spin_unlock_irq(&client
->lock
);
1278 if (todo
== ISO_RES_ALLOC
&& channel
>= 0)
1279 r
->channels
= 1ULL << channel
;
1281 if (todo
== ISO_RES_REALLOC
&& success
)
1284 if (todo
== ISO_RES_ALLOC
|| todo
== ISO_RES_ALLOC_ONCE
) {
1289 r
->e_dealloc
= NULL
;
1291 e
->iso_resource
.handle
= r
->resource
.handle
;
1292 e
->iso_resource
.channel
= channel
;
1293 e
->iso_resource
.bandwidth
= bandwidth
;
1295 queue_event(client
, &e
->event
,
1296 &e
->iso_resource
, sizeof(e
->iso_resource
), NULL
, 0);
1299 cancel_delayed_work(&r
->work
);
1301 kfree(r
->e_dealloc
);
1308 static void release_iso_resource(struct client
*client
,
1309 struct client_resource
*resource
)
1311 struct iso_resource
*r
=
1312 container_of(resource
, struct iso_resource
, resource
);
1314 spin_lock_irq(&client
->lock
);
1315 r
->todo
= ISO_RES_DEALLOC
;
1316 schedule_iso_resource(r
, 0);
1317 spin_unlock_irq(&client
->lock
);
1320 static int init_iso_resource(struct client
*client
,
1321 struct fw_cdev_allocate_iso_resource
*request
, int todo
)
1323 struct iso_resource_event
*e1
, *e2
;
1324 struct iso_resource
*r
;
1327 if ((request
->channels
== 0 && request
->bandwidth
== 0) ||
1328 request
->bandwidth
> BANDWIDTH_AVAILABLE_INITIAL
||
1329 request
->bandwidth
< 0)
1332 r
= kmalloc(sizeof(*r
), GFP_KERNEL
);
1333 e1
= kmalloc(sizeof(*e1
), GFP_KERNEL
);
1334 e2
= kmalloc(sizeof(*e2
), GFP_KERNEL
);
1335 if (r
== NULL
|| e1
== NULL
|| e2
== NULL
) {
1340 INIT_DELAYED_WORK(&r
->work
, iso_resource_work
);
1344 r
->channels
= request
->channels
;
1345 r
->bandwidth
= request
->bandwidth
;
1349 e1
->iso_resource
.closure
= request
->closure
;
1350 e1
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED
;
1351 e2
->iso_resource
.closure
= request
->closure
;
1352 e2
->iso_resource
.type
= FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED
;
1354 if (todo
== ISO_RES_ALLOC
) {
1355 r
->resource
.release
= release_iso_resource
;
1356 ret
= add_client_resource(client
, &r
->resource
, GFP_KERNEL
);
1360 r
->resource
.release
= NULL
;
1361 r
->resource
.handle
= -1;
1362 schedule_iso_resource(r
, 0);
1364 request
->handle
= r
->resource
.handle
;
1375 static int ioctl_allocate_iso_resource(struct client
*client
,
1376 union ioctl_arg
*arg
)
1378 return init_iso_resource(client
,
1379 &arg
->allocate_iso_resource
, ISO_RES_ALLOC
);
1382 static int ioctl_deallocate_iso_resource(struct client
*client
,
1383 union ioctl_arg
*arg
)
1385 return release_client_resource(client
,
1386 arg
->deallocate
.handle
, release_iso_resource
, NULL
);
1389 static int ioctl_allocate_iso_resource_once(struct client
*client
,
1390 union ioctl_arg
*arg
)
1392 return init_iso_resource(client
,
1393 &arg
->allocate_iso_resource
, ISO_RES_ALLOC_ONCE
);
1396 static int ioctl_deallocate_iso_resource_once(struct client
*client
,
1397 union ioctl_arg
*arg
)
1399 return init_iso_resource(client
,
1400 &arg
->allocate_iso_resource
, ISO_RES_DEALLOC_ONCE
);
1404 * Returns a speed code: Maximum speed to or from this device,
1405 * limited by the device's link speed, the local node's link speed,
1406 * and all PHY port speeds between the two links.
1408 static int ioctl_get_speed(struct client
*client
, union ioctl_arg
*arg
)
1410 return client
->device
->max_speed
;
1413 static int ioctl_send_broadcast_request(struct client
*client
,
1414 union ioctl_arg
*arg
)
1416 struct fw_cdev_send_request
*a
= &arg
->send_request
;
1419 case TCODE_WRITE_QUADLET_REQUEST
:
1420 case TCODE_WRITE_BLOCK_REQUEST
:
1426 /* Security policy: Only allow accesses to Units Space. */
1427 if (a
->offset
< CSR_REGISTER_BASE
+ CSR_CONFIG_ROM_END
)
1430 return init_request(client
, a
, LOCAL_BUS
| 0x3f, SCODE_100
);
1433 static int ioctl_send_stream_packet(struct client
*client
, union ioctl_arg
*arg
)
1435 struct fw_cdev_send_stream_packet
*a
= &arg
->send_stream_packet
;
1436 struct fw_cdev_send_request request
;
1439 if (a
->speed
> client
->device
->card
->link_speed
||
1440 a
->length
> 1024 << a
->speed
)
1443 if (a
->tag
> 3 || a
->channel
> 63 || a
->sy
> 15)
1446 dest
= fw_stream_packet_destination_id(a
->tag
, a
->channel
, a
->sy
);
1447 request
.tcode
= TCODE_STREAM_DATA
;
1448 request
.length
= a
->length
;
1449 request
.closure
= a
->closure
;
1450 request
.data
= a
->data
;
1451 request
.generation
= a
->generation
;
1453 return init_request(client
, &request
, dest
, a
->speed
);
1456 static void outbound_phy_packet_callback(struct fw_packet
*packet
,
1457 struct fw_card
*card
, int status
)
1459 struct outbound_phy_packet_event
*e
=
1460 container_of(packet
, struct outbound_phy_packet_event
, p
);
1464 case ACK_COMPLETE
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1465 /* should never happen with PHY packets: */
1466 case ACK_PENDING
: e
->phy_packet
.rcode
= RCODE_COMPLETE
; break;
1469 case ACK_BUSY_B
: e
->phy_packet
.rcode
= RCODE_BUSY
; break;
1470 case ACK_DATA_ERROR
: e
->phy_packet
.rcode
= RCODE_DATA_ERROR
; break;
1471 case ACK_TYPE_ERROR
: e
->phy_packet
.rcode
= RCODE_TYPE_ERROR
; break;
1472 /* stale generation; cancelled; on certain controllers: no ack */
1473 default: e
->phy_packet
.rcode
= status
; break;
1475 e
->phy_packet
.data
[0] = packet
->timestamp
;
1477 queue_event(e
->client
, &e
->event
, &e
->phy_packet
,
1478 sizeof(e
->phy_packet
) + e
->phy_packet
.length
, NULL
, 0);
1479 client_put(e
->client
);
1482 static int ioctl_send_phy_packet(struct client
*client
, union ioctl_arg
*arg
)
1484 struct fw_cdev_send_phy_packet
*a
= &arg
->send_phy_packet
;
1485 struct fw_card
*card
= client
->device
->card
;
1486 struct outbound_phy_packet_event
*e
;
1488 /* Access policy: Allow this ioctl only on local nodes' device files. */
1489 if (!client
->device
->is_local
)
1492 e
= kzalloc(sizeof(*e
) + 4, GFP_KERNEL
);
1498 e
->p
.speed
= SCODE_100
;
1499 e
->p
.generation
= a
->generation
;
1500 e
->p
.header
[0] = a
->data
[0];
1501 e
->p
.header
[1] = a
->data
[1];
1502 e
->p
.header_length
= 8;
1503 e
->p
.callback
= outbound_phy_packet_callback
;
1504 e
->phy_packet
.closure
= a
->closure
;
1505 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_SENT
;
1506 if (is_ping_packet(a
->data
))
1507 e
->phy_packet
.length
= 4;
1509 card
->driver
->send_request(card
, &e
->p
);
1514 static int ioctl_receive_phy_packets(struct client
*client
, union ioctl_arg
*arg
)
1516 struct fw_cdev_receive_phy_packets
*a
= &arg
->receive_phy_packets
;
1517 struct fw_card
*card
= client
->device
->card
;
1519 /* Access policy: Allow this ioctl only on local nodes' device files. */
1520 if (!client
->device
->is_local
)
1523 spin_lock_irq(&card
->lock
);
1525 list_move_tail(&client
->phy_receiver_link
, &card
->phy_receiver_list
);
1526 client
->phy_receiver_closure
= a
->closure
;
1528 spin_unlock_irq(&card
->lock
);
1533 void fw_cdev_handle_phy_packet(struct fw_card
*card
, struct fw_packet
*p
)
1535 struct client
*client
;
1536 struct inbound_phy_packet_event
*e
;
1537 unsigned long flags
;
1539 spin_lock_irqsave(&card
->lock
, flags
);
1541 list_for_each_entry(client
, &card
->phy_receiver_list
, phy_receiver_link
) {
1542 e
= kmalloc(sizeof(*e
) + 8, GFP_ATOMIC
);
1544 fw_notify("Out of memory when allocating event\n");
1547 e
->phy_packet
.closure
= client
->phy_receiver_closure
;
1548 e
->phy_packet
.type
= FW_CDEV_EVENT_PHY_PACKET_RECEIVED
;
1549 e
->phy_packet
.rcode
= RCODE_COMPLETE
;
1550 e
->phy_packet
.length
= 8;
1551 e
->phy_packet
.data
[0] = p
->header
[1];
1552 e
->phy_packet
.data
[1] = p
->header
[2];
1553 queue_event(client
, &e
->event
,
1554 &e
->phy_packet
, sizeof(e
->phy_packet
) + 8, NULL
, 0);
1557 spin_unlock_irqrestore(&card
->lock
, flags
);
1560 static int (* const ioctl_handlers
[])(struct client
*, union ioctl_arg
*) = {
1561 [0x00] = ioctl_get_info
,
1562 [0x01] = ioctl_send_request
,
1563 [0x02] = ioctl_allocate
,
1564 [0x03] = ioctl_deallocate
,
1565 [0x04] = ioctl_send_response
,
1566 [0x05] = ioctl_initiate_bus_reset
,
1567 [0x06] = ioctl_add_descriptor
,
1568 [0x07] = ioctl_remove_descriptor
,
1569 [0x08] = ioctl_create_iso_context
,
1570 [0x09] = ioctl_queue_iso
,
1571 [0x0a] = ioctl_start_iso
,
1572 [0x0b] = ioctl_stop_iso
,
1573 [0x0c] = ioctl_get_cycle_timer
,
1574 [0x0d] = ioctl_allocate_iso_resource
,
1575 [0x0e] = ioctl_deallocate_iso_resource
,
1576 [0x0f] = ioctl_allocate_iso_resource_once
,
1577 [0x10] = ioctl_deallocate_iso_resource_once
,
1578 [0x11] = ioctl_get_speed
,
1579 [0x12] = ioctl_send_broadcast_request
,
1580 [0x13] = ioctl_send_stream_packet
,
1581 [0x14] = ioctl_get_cycle_timer2
,
1582 [0x15] = ioctl_send_phy_packet
,
1583 [0x16] = ioctl_receive_phy_packets
,
1584 [0x17] = ioctl_set_iso_channels
,
1587 static int dispatch_ioctl(struct client
*client
,
1588 unsigned int cmd
, void __user
*arg
)
1590 union ioctl_arg buffer
;
1593 if (fw_device_is_shutdown(client
->device
))
1596 if (_IOC_TYPE(cmd
) != '#' ||
1597 _IOC_NR(cmd
) >= ARRAY_SIZE(ioctl_handlers
) ||
1598 _IOC_SIZE(cmd
) > sizeof(buffer
))
1601 if (_IOC_DIR(cmd
) == _IOC_READ
)
1602 memset(&buffer
, 0, _IOC_SIZE(cmd
));
1604 if (_IOC_DIR(cmd
) & _IOC_WRITE
)
1605 if (copy_from_user(&buffer
, arg
, _IOC_SIZE(cmd
)))
1608 ret
= ioctl_handlers
[_IOC_NR(cmd
)](client
, &buffer
);
1612 if (_IOC_DIR(cmd
) & _IOC_READ
)
1613 if (copy_to_user(arg
, &buffer
, _IOC_SIZE(cmd
)))
1619 static long fw_device_op_ioctl(struct file
*file
,
1620 unsigned int cmd
, unsigned long arg
)
1622 return dispatch_ioctl(file
->private_data
, cmd
, (void __user
*)arg
);
1625 #ifdef CONFIG_COMPAT
1626 static long fw_device_op_compat_ioctl(struct file
*file
,
1627 unsigned int cmd
, unsigned long arg
)
1629 return dispatch_ioctl(file
->private_data
, cmd
, compat_ptr(arg
));
1633 static int fw_device_op_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1635 struct client
*client
= file
->private_data
;
1636 enum dma_data_direction direction
;
1638 int page_count
, ret
;
1640 if (fw_device_is_shutdown(client
->device
))
1643 if (client
->buffer
.pages
!= NULL
)
1646 if (!(vma
->vm_flags
& VM_SHARED
))
1649 if (vma
->vm_start
& ~PAGE_MASK
)
1652 client
->vm_start
= vma
->vm_start
;
1653 size
= vma
->vm_end
- vma
->vm_start
;
1654 page_count
= size
>> PAGE_SHIFT
;
1655 if (size
& ~PAGE_MASK
)
1658 if (vma
->vm_flags
& VM_WRITE
)
1659 direction
= DMA_TO_DEVICE
;
1661 direction
= DMA_FROM_DEVICE
;
1663 ret
= fw_iso_buffer_init(&client
->buffer
, client
->device
->card
,
1664 page_count
, direction
);
1668 ret
= fw_iso_buffer_map(&client
->buffer
, vma
);
1670 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1675 static int shutdown_resource(int id
, void *p
, void *data
)
1677 struct client_resource
*resource
= p
;
1678 struct client
*client
= data
;
1680 resource
->release(client
, resource
);
1686 static int fw_device_op_release(struct inode
*inode
, struct file
*file
)
1688 struct client
*client
= file
->private_data
;
1689 struct event
*event
, *next_event
;
1691 spin_lock_irq(&client
->device
->card
->lock
);
1692 list_del(&client
->phy_receiver_link
);
1693 spin_unlock_irq(&client
->device
->card
->lock
);
1695 mutex_lock(&client
->device
->client_list_mutex
);
1696 list_del(&client
->link
);
1697 mutex_unlock(&client
->device
->client_list_mutex
);
1699 if (client
->iso_context
)
1700 fw_iso_context_destroy(client
->iso_context
);
1702 if (client
->buffer
.pages
)
1703 fw_iso_buffer_destroy(&client
->buffer
, client
->device
->card
);
1705 /* Freeze client->resource_idr and client->event_list */
1706 spin_lock_irq(&client
->lock
);
1707 client
->in_shutdown
= true;
1708 spin_unlock_irq(&client
->lock
);
1710 idr_for_each(&client
->resource_idr
, shutdown_resource
, client
);
1711 idr_remove_all(&client
->resource_idr
);
1712 idr_destroy(&client
->resource_idr
);
1714 list_for_each_entry_safe(event
, next_event
, &client
->event_list
, link
)
1722 static unsigned int fw_device_op_poll(struct file
*file
, poll_table
* pt
)
1724 struct client
*client
= file
->private_data
;
1725 unsigned int mask
= 0;
1727 poll_wait(file
, &client
->wait
, pt
);
1729 if (fw_device_is_shutdown(client
->device
))
1730 mask
|= POLLHUP
| POLLERR
;
1731 if (!list_empty(&client
->event_list
))
1732 mask
|= POLLIN
| POLLRDNORM
;
1737 const struct file_operations fw_device_ops
= {
1738 .owner
= THIS_MODULE
,
1739 .llseek
= no_llseek
,
1740 .open
= fw_device_op_open
,
1741 .read
= fw_device_op_read
,
1742 .unlocked_ioctl
= fw_device_op_ioctl
,
1743 .mmap
= fw_device_op_mmap
,
1744 .release
= fw_device_op_release
,
1745 .poll
= fw_device_op_poll
,
1746 #ifdef CONFIG_COMPAT
1747 .compat_ioctl
= fw_device_op_compat_ioctl
,