4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
17 #include <linux/poll.h>
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #include <linux/smp_lock.h>
21 #include <asm/uaccess.h>
23 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
24 #include <linux/devfs_fs_kernel.h>
28 #include "ieee1394_types.h"
29 #include "ieee1394_core.h"
31 #include "highlevel.h"
32 #include "ieee1394_transactions.h"
36 #if BITS_PER_LONG == 64
37 #define int2ptr(x) ((void *)x)
38 #define ptr2int(x) ((u64)x)
40 #define int2ptr(x) ((void *)(u32)x)
41 #define ptr2int(x) ((u64)(u32)x)
45 static devfs_handle_t devfs_handle
;
47 LIST_HEAD(host_info_list
);
48 static int host_count
;
49 spinlock_t host_info_lock
= SPIN_LOCK_UNLOCKED
;
51 static struct hpsb_highlevel
*hl_handle
;
53 static atomic_t iso_buffer_size
;
54 static const int iso_buffer_max
= 4 * 1024 * 1024; /* 4 MB */
56 static void queue_complete_cb(struct pending_request
*req
);
58 static struct pending_request
*__alloc_pending_request(int flags
)
60 struct pending_request
*req
;
62 req
= (struct pending_request
*)kmalloc(sizeof(struct pending_request
),
65 memset(req
, 0, sizeof(struct pending_request
));
66 INIT_LIST_HEAD(&req
->list
);
67 req
->tq
.routine
= (void(*)(void*))queue_complete_cb
;
73 inline static struct pending_request
*alloc_pending_request(void)
75 return __alloc_pending_request(SLAB_KERNEL
);
78 static void free_pending_request(struct pending_request
*req
)
81 if (atomic_dec_and_test(&req
->ibs
->refcount
)) {
82 atomic_sub(req
->ibs
->data_size
, &iso_buffer_size
);
85 } else if (req
->free_data
) {
88 free_hpsb_packet(req
->packet
);
92 static void queue_complete_req(struct pending_request
*req
)
95 struct file_info
*fi
= req
->file_info
;
97 spin_lock_irqsave(&fi
->reqlists_lock
, flags
);
99 list_add_tail(&req
->list
, &fi
->req_complete
);
100 spin_unlock_irqrestore(&fi
->reqlists_lock
, flags
);
102 up(&fi
->complete_sem
);
103 wake_up_interruptible(&fi
->poll_wait_complete
);
106 static void queue_complete_cb(struct pending_request
*req
)
108 struct hpsb_packet
*packet
= req
->packet
;
109 int rcode
= (packet
->header
[1] >> 12) & 0xf;
111 switch (packet
->ack_code
) {
113 case ACKX_SEND_ERROR
:
114 req
->req
.error
= RAW1394_ERROR_SEND_ERROR
;
117 req
->req
.error
= RAW1394_ERROR_ABORTED
;
120 req
->req
.error
= RAW1394_ERROR_TIMEOUT
;
123 req
->req
.error
= (packet
->ack_code
<< 16) | rcode
;
127 if (!((packet
->ack_code
== ACK_PENDING
) && (rcode
== RCODE_COMPLETE
))) {
131 free_tlabel(packet
->host
, packet
->node_id
, packet
->tlabel
);
133 queue_complete_req(req
);
137 static void add_host(struct hpsb_host
*host
)
139 struct host_info
*hi
;
141 hi
= (struct host_info
*)kmalloc(sizeof(struct host_info
), SLAB_KERNEL
);
143 INIT_LIST_HEAD(&hi
->list
);
145 INIT_LIST_HEAD(&hi
->file_info_list
);
147 spin_lock_irq(&host_info_lock
);
148 list_add_tail(&hi
->list
, &host_info_list
);
150 spin_unlock_irq(&host_info_lock
);
155 static struct host_info
*find_host_info(struct hpsb_host
*host
)
157 struct list_head
*lh
;
158 struct host_info
*hi
;
160 lh
= host_info_list
.next
;
161 while (lh
!= &host_info_list
) {
162 hi
= list_entry(lh
, struct host_info
, list
);
163 if (hi
->host
== host
) {
172 static void remove_host(struct hpsb_host
*host
)
174 struct host_info
*hi
;
176 spin_lock_irq(&host_info_lock
);
177 hi
= find_host_info(host
);
183 spin_unlock_irq(&host_info_lock
);
186 printk(KERN_ERR
"raw1394: attempt to remove unknown host "
194 static void host_reset(struct hpsb_host
*host
)
197 struct list_head
*lh
;
198 struct host_info
*hi
;
199 struct file_info
*fi
;
200 struct pending_request
*req
;
202 spin_lock_irqsave(&host_info_lock
, flags
);
203 hi
= find_host_info(host
);
206 lh
= hi
->file_info_list
.next
;
208 while (lh
!= &hi
->file_info_list
) {
209 fi
= list_entry(lh
, struct file_info
, list
);
210 req
= __alloc_pending_request(SLAB_ATOMIC
);
214 req
->req
.type
= RAW1394_REQ_BUS_RESET
;
215 req
->req
.generation
= get_hpsb_generation();
216 req
->req
.misc
= (host
->node_id
<< 16)
218 if (fi
->protocol_version
> 3) {
219 req
->req
.misc
|= ((host
->irm_id
223 queue_complete_req(req
);
229 spin_unlock_irqrestore(&host_info_lock
, flags
);
232 static void iso_receive(struct hpsb_host
*host
, int channel
, quadlet_t
*data
,
236 struct list_head
*lh
;
237 struct host_info
*hi
;
238 struct file_info
*fi
;
239 struct pending_request
*req
;
240 struct iso_block_store
*ibs
= NULL
;
243 if ((atomic_read(&iso_buffer_size
) + length
) > iso_buffer_max
) {
244 HPSB_INFO("dropped iso packet");
248 spin_lock_irqsave(&host_info_lock
, flags
);
249 hi
= find_host_info(host
);
252 for (lh
= hi
->file_info_list
.next
; lh
!= &hi
->file_info_list
;
254 fi
= list_entry(lh
, struct file_info
, list
);
256 if (!(fi
->listen_channels
& (1ULL << channel
))) {
260 req
= __alloc_pending_request(SLAB_ATOMIC
);
264 ibs
= kmalloc(sizeof(struct iso_block_store
)
265 + length
, SLAB_ATOMIC
);
271 atomic_add(length
, &iso_buffer_size
);
272 atomic_set(&ibs
->refcount
, 0);
273 ibs
->data_size
= length
;
274 memcpy(ibs
->data
, data
, length
);
277 atomic_inc(&ibs
->refcount
);
281 req
->data
= ibs
->data
;
282 req
->req
.type
= RAW1394_REQ_ISO_RECEIVE
;
283 req
->req
.generation
= get_hpsb_generation();
285 req
->req
.recvb
= ptr2int(fi
->iso_buffer
);
286 req
->req
.length
= MIN(length
, fi
->iso_buffer_length
);
288 list_add_tail(&req
->list
, &reqs
);
291 spin_unlock_irqrestore(&host_info_lock
, flags
);
294 while (lh
!= &reqs
) {
295 req
= list_entry(lh
, struct pending_request
, list
);
297 queue_complete_req(req
);
301 static void fcp_request(struct hpsb_host
*host
, int nodeid
, int direction
,
302 int cts
, u8
*data
, unsigned int length
)
305 struct list_head
*lh
;
306 struct host_info
*hi
;
307 struct file_info
*fi
;
308 struct pending_request
*req
;
309 struct iso_block_store
*ibs
= NULL
;
312 if ((atomic_read(&iso_buffer_size
) + length
) > iso_buffer_max
) {
313 HPSB_INFO("dropped fcp request");
317 spin_lock_irqsave(&host_info_lock
, flags
);
318 hi
= find_host_info(host
);
321 for (lh
= hi
->file_info_list
.next
; lh
!= &hi
->file_info_list
;
323 fi
= list_entry(lh
, struct file_info
, list
);
325 if (!fi
->fcp_buffer
) {
329 req
= __alloc_pending_request(SLAB_ATOMIC
);
333 ibs
= kmalloc(sizeof(struct iso_block_store
)
334 + length
, SLAB_ATOMIC
);
340 atomic_add(length
, &iso_buffer_size
);
341 atomic_set(&ibs
->refcount
, 0);
342 ibs
->data_size
= length
;
343 memcpy(ibs
->data
, data
, length
);
346 atomic_inc(&ibs
->refcount
);
350 req
->data
= ibs
->data
;
351 req
->req
.type
= RAW1394_REQ_FCP_REQUEST
;
352 req
->req
.generation
= get_hpsb_generation();
353 req
->req
.misc
= nodeid
| (direction
<< 16);
354 req
->req
.recvb
= ptr2int(fi
->fcp_buffer
);
355 req
->req
.length
= length
;
357 list_add_tail(&req
->list
, &reqs
);
360 spin_unlock_irqrestore(&host_info_lock
, flags
);
363 while (lh
!= &reqs
) {
364 req
= list_entry(lh
, struct pending_request
, list
);
366 queue_complete_req(req
);
371 static ssize_t
dev_read(struct file
*file
, char *buffer
, size_t count
,
372 loff_t
*offset_is_ignored
)
374 struct file_info
*fi
= (struct file_info
*)file
->private_data
;
375 struct list_head
*lh
;
376 struct pending_request
*req
;
378 if (count
!= sizeof(struct raw1394_request
)) {
382 if (!access_ok(VERIFY_WRITE
, buffer
, count
)) {
386 if (file
->f_flags
& O_NONBLOCK
) {
387 if (down_trylock(&fi
->complete_sem
)) {
391 if (down_interruptible(&fi
->complete_sem
)) {
396 spin_lock_irq(&fi
->reqlists_lock
);
397 lh
= fi
->req_complete
.next
;
399 spin_unlock_irq(&fi
->reqlists_lock
);
401 req
= list_entry(lh
, struct pending_request
, list
);
403 if (req
->req
.length
) {
404 if (copy_to_user(int2ptr(req
->req
.recvb
), req
->data
,
406 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
409 __copy_to_user(buffer
, &req
->req
, sizeof(req
->req
));
411 free_pending_request(req
);
412 return sizeof(struct raw1394_request
);
416 static int state_opened(struct file_info
*fi
, struct pending_request
*req
)
418 if (req
->req
.type
== RAW1394_REQ_INITIALIZE
) {
419 switch (req
->req
.misc
) {
420 case RAW1394_KERNELAPI_VERSION
:
422 fi
->state
= initialized
;
423 fi
->protocol_version
= req
->req
.misc
;
424 req
->req
.error
= RAW1394_ERROR_NONE
;
425 req
->req
.generation
= get_hpsb_generation();
429 req
->req
.error
= RAW1394_ERROR_COMPAT
;
430 req
->req
.misc
= RAW1394_KERNELAPI_VERSION
;
433 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
437 queue_complete_req(req
);
438 return sizeof(struct raw1394_request
);
441 static int state_initialized(struct file_info
*fi
, struct pending_request
*req
)
443 struct list_head
*lh
;
444 struct host_info
*hi
;
445 struct raw1394_khost_list
*khl
;
447 if (req
->req
.generation
!= get_hpsb_generation()) {
448 req
->req
.error
= RAW1394_ERROR_GENERATION
;
449 req
->req
.generation
= get_hpsb_generation();
451 queue_complete_req(req
);
452 return sizeof(struct raw1394_request
);
455 switch (req
->req
.type
) {
456 case RAW1394_REQ_LIST_CARDS
:
457 spin_lock_irq(&host_info_lock
);
458 khl
= kmalloc(sizeof(struct raw1394_khost_list
) * host_count
,
462 req
->req
.misc
= host_count
;
463 req
->data
= (quadlet_t
*)khl
;
465 lh
= host_info_list
.next
;
466 while (lh
!= &host_info_list
) {
467 hi
= list_entry(lh
, struct host_info
, list
);
469 khl
->nodes
= hi
->host
->node_count
;
470 strcpy(khl
->name
, hi
->host
->template->name
);
476 spin_unlock_irq(&host_info_lock
);
479 req
->req
.error
= RAW1394_ERROR_NONE
;
480 req
->req
.length
= MIN(req
->req
.length
,
481 sizeof(struct raw1394_khost_list
)
489 case RAW1394_REQ_SET_CARD
:
492 spin_lock_irq(&host_info_lock
);
493 if (req
->req
.misc
< host_count
) {
494 lh
= host_info_list
.next
;
495 while (req
->req
.misc
--) {
498 hi
= list_entry(lh
, struct host_info
, list
);
499 hpsb_inc_host_usage(hi
->host
);
500 list_add_tail(&fi
->list
, &hi
->file_info_list
);
502 fi
->state
= connected
;
504 spin_unlock_irq(&host_info_lock
);
507 req
->req
.error
= RAW1394_ERROR_NONE
;
508 req
->req
.misc
= (fi
->host
->node_id
<< 16)
509 | fi
->host
->node_count
;
510 if (fi
->protocol_version
> 3) {
512 (fi
->host
->irm_id
& NODE_MASK
) << 8;
515 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
522 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
527 queue_complete_req(req
);
528 return sizeof(struct raw1394_request
);
531 static void handle_iso_listen(struct file_info
*fi
, struct pending_request
*req
)
533 int channel
= req
->req
.misc
;
535 spin_lock(&host_info_lock
);
536 if ((channel
> 63) || (channel
< -64)) {
537 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
538 } else if (channel
>= 0) {
539 /* allocate channel req.misc */
540 if (fi
->listen_channels
& (1ULL << channel
)) {
541 req
->req
.error
= RAW1394_ERROR_ALREADY
;
543 fi
->listen_channels
|= 1ULL << channel
;
544 hpsb_listen_channel(hl_handle
, fi
->host
, channel
);
545 fi
->iso_buffer
= int2ptr(req
->req
.recvb
);
546 fi
->iso_buffer_length
= req
->req
.length
;
549 /* deallocate channel (one's complement neg) req.misc */
552 if (fi
->listen_channels
& (1ULL << channel
)) {
553 hpsb_unlisten_channel(hl_handle
, fi
->host
, channel
);
554 fi
->listen_channels
&= ~(1ULL << channel
);
556 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
561 queue_complete_req(req
);
562 spin_unlock(&host_info_lock
);
565 static void handle_fcp_listen(struct file_info
*fi
, struct pending_request
*req
)
568 if (fi
->fcp_buffer
) {
569 req
->req
.error
= RAW1394_ERROR_ALREADY
;
571 fi
->fcp_buffer
= (u8
*)int2ptr(req
->req
.recvb
);
574 if (!fi
->fcp_buffer
) {
575 req
->req
.error
= RAW1394_ERROR_ALREADY
;
577 fi
->fcp_buffer
= NULL
;
582 queue_complete_req(req
);
585 static int handle_local_request(struct file_info
*fi
,
586 struct pending_request
*req
, int node
)
588 u64 addr
= req
->req
.address
& 0xffffffffffffULL
;
590 req
->data
= kmalloc(req
->req
.length
, SLAB_KERNEL
);
591 if (!req
->data
) return -ENOMEM
;
594 switch (req
->req
.type
) {
595 case RAW1394_REQ_ASYNC_READ
:
596 req
->req
.error
= highlevel_read(fi
->host
, node
, req
->data
, addr
,
600 case RAW1394_REQ_ASYNC_WRITE
:
601 if (copy_from_user(req
->data
, int2ptr(req
->req
.sendb
),
603 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
607 req
->req
.error
= highlevel_write(fi
->host
, node
, req
->data
,
608 addr
, req
->req
.length
);
612 case RAW1394_REQ_LOCK
:
613 if ((req
->req
.misc
== EXTCODE_FETCH_ADD
)
614 || (req
->req
.misc
== EXTCODE_LITTLE_ADD
)) {
615 if (req
->req
.length
!= 4) {
616 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
620 if (req
->req
.length
!= 8) {
621 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
626 if (copy_from_user(req
->data
, int2ptr(req
->req
.sendb
),
628 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
632 if (req
->req
.length
== 8) {
633 req
->req
.error
= highlevel_lock(fi
->host
, node
,
640 req
->req
.error
= highlevel_lock(fi
->host
, node
,
647 case RAW1394_REQ_LOCK64
:
649 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
652 if (req
->req
.error
) req
->req
.length
= 0;
653 req
->req
.error
|= 0x00100000;
654 queue_complete_req(req
);
655 return sizeof(struct raw1394_request
);
658 static int handle_remote_request(struct file_info
*fi
,
659 struct pending_request
*req
, int node
)
661 struct hpsb_packet
*packet
= NULL
;
662 u64 addr
= req
->req
.address
& 0xffffffffffffULL
;
664 switch (req
->req
.type
) {
665 case RAW1394_REQ_ASYNC_READ
:
666 if (req
->req
.length
== 4) {
667 packet
= hpsb_make_readqpacket(fi
->host
, node
, addr
);
668 if (!packet
) return -ENOMEM
;
670 req
->data
= &packet
->header
[3];
672 packet
= hpsb_make_readbpacket(fi
->host
, node
, addr
,
674 if (!packet
) return -ENOMEM
;
676 req
->data
= packet
->data
;
680 case RAW1394_REQ_ASYNC_WRITE
:
681 if (req
->req
.length
== 4) {
684 if (copy_from_user(&x
, int2ptr(req
->req
.sendb
), 4)) {
685 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
688 packet
= hpsb_make_writeqpacket(fi
->host
, node
, addr
,
690 if (!packet
) return -ENOMEM
;
692 packet
= hpsb_make_writebpacket(fi
->host
, node
, addr
,
694 if (!packet
) return -ENOMEM
;
696 if (copy_from_user(packet
->data
, int2ptr(req
->req
.sendb
),
698 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
704 case RAW1394_REQ_LOCK
:
705 if ((req
->req
.misc
== EXTCODE_FETCH_ADD
)
706 || (req
->req
.misc
== EXTCODE_LITTLE_ADD
)) {
707 if (req
->req
.length
!= 4) {
708 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
712 if (req
->req
.length
!= 8) {
713 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
718 packet
= hpsb_make_lockpacket(fi
->host
, node
, addr
,
720 if (!packet
) return -ENOMEM
;
722 if (copy_from_user(packet
->data
, int2ptr(req
->req
.sendb
),
724 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
728 req
->data
= packet
->data
;
732 case RAW1394_REQ_LOCK64
:
734 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
737 req
->packet
= packet
;
739 if (req
->req
.error
) {
741 queue_complete_req(req
);
742 return sizeof(struct raw1394_request
);
746 queue_task(&req
->tq
, &packet
->complete_tq
);
748 spin_lock_irq(&fi
->reqlists_lock
);
749 list_add_tail(&req
->list
, &fi
->req_pending
);
750 spin_unlock_irq(&fi
->reqlists_lock
);
752 if (!hpsb_send_packet(packet
)) {
753 req
->req
.error
= RAW1394_ERROR_SEND_ERROR
;
755 free_tlabel(packet
->host
, packet
->node_id
, packet
->tlabel
);
756 queue_complete_req(req
);
758 return sizeof(struct raw1394_request
);
761 static int handle_iso_send(struct file_info
*fi
, struct pending_request
*req
,
764 struct hpsb_packet
*packet
;
766 packet
= alloc_hpsb_packet(req
->req
.length
);
767 if (!packet
) return -ENOMEM
;
768 req
->packet
= packet
;
770 fill_iso_packet(packet
, req
->req
.length
, channel
& 0x3f,
771 (req
->req
.misc
>> 16) & 0x3, req
->req
.misc
& 0xf);
773 packet
->speed_code
= req
->req
.address
& 0x3;
774 packet
->host
= fi
->host
;
776 if (copy_from_user(packet
->data
, int2ptr(req
->req
.sendb
),
778 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
780 queue_complete_req(req
);
781 return sizeof(struct raw1394_request
);
785 req
->tq
.routine
= (void (*)(void*))queue_complete_req
;
787 queue_task(&req
->tq
, &packet
->complete_tq
);
789 spin_lock_irq(&fi
->reqlists_lock
);
790 list_add_tail(&req
->list
, &fi
->req_pending
);
791 spin_unlock_irq(&fi
->reqlists_lock
);
793 if (!hpsb_send_packet(packet
)) {
794 req
->req
.error
= RAW1394_ERROR_SEND_ERROR
;
795 queue_complete_req(req
);
798 return sizeof(struct raw1394_request
);
801 static int state_connected(struct file_info
*fi
, struct pending_request
*req
)
803 int node
= req
->req
.address
>> 48;
805 req
->req
.error
= RAW1394_ERROR_NONE
;
807 if (req
->req
.type
== RAW1394_REQ_ISO_SEND
) {
808 return handle_iso_send(fi
, req
, node
);
811 if (req
->req
.generation
!= get_hpsb_generation()) {
812 req
->req
.error
= RAW1394_ERROR_GENERATION
;
813 req
->req
.generation
= get_hpsb_generation();
815 queue_complete_req(req
);
816 return sizeof(struct raw1394_request
);
819 switch (req
->req
.type
) {
820 case RAW1394_REQ_ISO_LISTEN
:
821 handle_iso_listen(fi
, req
);
822 return sizeof(struct raw1394_request
);
824 case RAW1394_REQ_FCP_LISTEN
:
825 handle_fcp_listen(fi
, req
);
826 return sizeof(struct raw1394_request
);
828 case RAW1394_REQ_RESET_BUS
:
829 hpsb_reset_bus(fi
->host
);
830 return sizeof(struct raw1394_request
);
833 if (req
->req
.length
== 0) {
834 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
835 queue_complete_req(req
);
836 return sizeof(struct raw1394_request
);
839 if (fi
->host
->node_id
== node
) {
840 return handle_local_request(fi
, req
, node
);
843 return handle_remote_request(fi
, req
, node
);
847 static ssize_t
dev_write(struct file
*file
, const char *buffer
, size_t count
,
848 loff_t
*offset_is_ignored
)
850 struct file_info
*fi
= (struct file_info
*)file
->private_data
;
851 struct pending_request
*req
;
854 if (count
!= sizeof(struct raw1394_request
)) {
858 req
= alloc_pending_request();
864 if (copy_from_user(&req
->req
, buffer
, sizeof(struct raw1394_request
))) {
865 free_pending_request(req
);
871 retval
= state_opened(fi
, req
);
875 retval
= state_initialized(fi
, req
);
879 retval
= state_connected(fi
, req
);
884 free_pending_request(req
);
890 static unsigned int dev_poll(struct file
*file
, poll_table
*pt
)
892 struct file_info
*fi
= file
->private_data
;
893 unsigned int mask
= POLLOUT
| POLLWRNORM
;
895 poll_wait(file
, &fi
->poll_wait_complete
, pt
);
897 spin_lock_irq(&fi
->reqlists_lock
);
898 if (!list_empty(&fi
->req_complete
)) {
899 mask
|= POLLIN
| POLLRDNORM
;
901 spin_unlock_irq(&fi
->reqlists_lock
);
906 static int dev_open(struct inode
*inode
, struct file
*file
)
908 struct file_info
*fi
;
910 if (MINOR(inode
->i_rdev
)) {
914 V22_COMPAT_MOD_INC_USE_COUNT
;
916 fi
= kmalloc(sizeof(struct file_info
), SLAB_KERNEL
);
918 V22_COMPAT_MOD_DEC_USE_COUNT
;
922 memset(fi
, 0, sizeof(struct file_info
));
924 INIT_LIST_HEAD(&fi
->list
);
926 INIT_LIST_HEAD(&fi
->req_pending
);
927 INIT_LIST_HEAD(&fi
->req_complete
);
928 sema_init(&fi
->complete_sem
, 0);
929 spin_lock_init(&fi
->reqlists_lock
);
930 init_waitqueue_head(&fi
->poll_wait_complete
);
932 file
->private_data
= fi
;
937 static int dev_release(struct inode
*inode
, struct file
*file
)
939 struct file_info
*fi
= file
->private_data
;
940 struct list_head
*lh
;
941 struct pending_request
*req
;
945 for (i
= 0; i
< 64; i
++) {
946 if (fi
->listen_channels
& (1ULL << i
)) {
947 hpsb_unlisten_channel(hl_handle
, fi
->host
, i
);
951 spin_lock(&host_info_lock
);
952 fi
->listen_channels
= 0;
953 spin_unlock(&host_info_lock
);
956 spin_lock_irq(&fi
->reqlists_lock
);
958 while (!list_empty(&fi
->req_complete
)) {
959 lh
= fi
->req_complete
.next
;
962 req
= list_entry(lh
, struct pending_request
, list
);
964 free_pending_request(req
);
967 if (list_empty(&fi
->req_pending
)) {
971 spin_unlock_irq(&fi
->reqlists_lock
);
974 down_interruptible(&fi
->complete_sem
);
978 if (fi
->state
== connected
) {
979 spin_lock_irq(&host_info_lock
);
981 spin_unlock_irq(&host_info_lock
);
983 hpsb_dec_host_usage(fi
->host
);
988 V22_COMPAT_MOD_DEC_USE_COUNT
;
993 static struct hpsb_highlevel_ops hl_ops
= {
995 remove_host
: remove_host
,
996 host_reset
: host_reset
,
997 iso_receive
: iso_receive
,
998 fcp_request
: fcp_request
,
1001 static struct file_operations file_ops
= {
1007 release
: dev_release
,
1010 int init_raw1394(void)
1012 hl_handle
= hpsb_register_highlevel(RAW1394_DEVICE_NAME
, &hl_ops
);
1013 if (hl_handle
== NULL
) {
1014 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
1018 devfs_handle
= devfs_register(NULL
, RAW1394_DEVICE_NAME
, DEVFS_FL_NONE
,
1019 RAW1394_DEVICE_MAJOR
, 0,
1020 S_IFCHR
| S_IRUSR
| S_IWUSR
, &file_ops
,
1023 if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR
, RAW1394_DEVICE_NAME
,
1025 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
1028 printk(KERN_INFO
"raw1394: /dev/%s device initialized\n", RAW1394_DEVICE_NAME
);
1032 void cleanup_raw1394(void)
1034 devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR
, RAW1394_DEVICE_NAME
);
1035 devfs_unregister(devfs_handle
);
1036 hpsb_unregister_highlevel(hl_handle
);
1041 int init_module(void)
1043 return init_raw1394();
1046 void cleanup_module(void)
1048 return cleanup_raw1394();