4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
17 #include <linux/poll.h>
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #include <asm/uaccess.h>
22 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
23 #include <linux/devfs_fs_kernel.h>
27 #include "ieee1394_types.h"
28 #include "ieee1394_core.h"
30 #include "highlevel.h"
31 #include "ieee1394_transactions.h"
35 static devfs_handle_t devfs_handle
= NULL
;
37 LIST_HEAD(host_info_list
);
38 static int host_count
= 0;
39 spinlock_t host_info_lock
= SPIN_LOCK_UNLOCKED
;
41 static struct hpsb_highlevel
*hl_handle
= NULL
;
43 static atomic_t iso_buffer_size
;
44 static const int iso_buffer_max
= 4 * 1024 * 1024; /* 4 MB */
46 static void queue_complete_cb(struct pending_request
*req
);
48 static struct pending_request
*__alloc_pending_request(int flags
)
50 struct pending_request
*req
;
52 req
= (struct pending_request
*)kmalloc(sizeof(struct pending_request
),
55 memset(req
, 0, sizeof(struct pending_request
));
56 INIT_LIST_HEAD(&req
->list
);
57 req
->tq
.routine
= (void(*)(void*))queue_complete_cb
;
63 inline static struct pending_request
*alloc_pending_request(void)
65 return __alloc_pending_request(SLAB_KERNEL
);
68 static void free_pending_request(struct pending_request
*req
)
71 if (atomic_dec_and_test(&req
->ibs
->refcount
)) {
72 atomic_sub(req
->ibs
->data_size
, &iso_buffer_size
);
75 } else if (req
->free_data
) {
78 free_hpsb_packet(req
->packet
);
82 static void queue_complete_req(struct pending_request
*req
)
85 struct file_info
*fi
= req
->file_info
;
87 spin_lock_irqsave(&fi
->reqlists_lock
, flags
);
89 list_add_tail(&req
->list
, &fi
->req_complete
);
90 spin_unlock_irqrestore(&fi
->reqlists_lock
, flags
);
92 up(&fi
->complete_sem
);
93 wake_up_interruptible(&fi
->poll_wait_complete
);
96 static void queue_complete_cb(struct pending_request
*req
)
98 struct hpsb_packet
*packet
= req
->packet
;
99 int rcode
= (packet
->header
[1] >> 12) & 0xf;
101 switch (packet
->ack_code
) {
103 case ACKX_SEND_ERROR
:
104 req
->req
.error
= RAW1394_ERROR_SEND_ERROR
;
107 req
->req
.error
= RAW1394_ERROR_ABORTED
;
110 req
->req
.error
= RAW1394_ERROR_TIMEOUT
;
113 req
->req
.error
= (packet
->ack_code
<< 16) | rcode
;
117 if (!((packet
->ack_code
== ACK_PENDING
) && (rcode
== RCODE_COMPLETE
))) {
121 free_tlabel(packet
->host
, packet
->node_id
, packet
->tlabel
);
123 queue_complete_req(req
);
127 static void add_host(struct hpsb_host
*host
)
129 struct host_info
*hi
;
131 hi
= (struct host_info
*)kmalloc(sizeof(struct host_info
), SLAB_KERNEL
);
133 INIT_LIST_HEAD(&hi
->list
);
135 INIT_LIST_HEAD(&hi
->file_info_list
);
137 spin_lock_irq(&host_info_lock
);
138 list_add_tail(&hi
->list
, &host_info_list
);
140 spin_unlock_irq(&host_info_lock
);
145 static struct host_info
*find_host_info(struct hpsb_host
*host
)
147 struct list_head
*lh
;
148 struct host_info
*hi
;
150 lh
= host_info_list
.next
;
151 while (lh
!= &host_info_list
) {
152 hi
= list_entry(lh
, struct host_info
, list
);
153 if (hi
->host
== host
) {
162 static void remove_host(struct hpsb_host
*host
)
164 struct host_info
*hi
;
166 spin_lock_irq(&host_info_lock
);
167 hi
= find_host_info(host
);
173 spin_unlock_irq(&host_info_lock
);
176 printk(KERN_ERR
"raw1394: attempt to remove unknown host "
184 static void host_reset(struct hpsb_host
*host
)
187 struct list_head
*lh
;
188 struct host_info
*hi
;
189 struct file_info
*fi
;
190 struct pending_request
*req
;
192 spin_lock_irqsave(&host_info_lock
, flags
);
193 hi
= find_host_info(host
);
196 lh
= hi
->file_info_list
.next
;
198 while (lh
!= &hi
->file_info_list
) {
199 fi
= list_entry(lh
, struct file_info
, list
);
200 req
= __alloc_pending_request(SLAB_ATOMIC
);
204 req
->req
.type
= RAW1394_REQ_BUS_RESET
;
205 req
->req
.generation
= get_hpsb_generation();
206 req
->req
.misc
= (host
->node_id
<< 16)
208 queue_complete_req(req
);
214 spin_unlock_irqrestore(&host_info_lock
, flags
);
217 static void iso_receive(struct hpsb_host
*host
, int channel
, quadlet_t
*data
,
221 struct list_head
*lh
;
222 struct host_info
*hi
;
223 struct file_info
*fi
;
224 struct pending_request
*req
;
225 struct iso_block_store
*ibs
= NULL
;
228 if ((atomic_read(&iso_buffer_size
) + length
) > iso_buffer_max
) {
229 HPSB_INFO("dropped iso packet");
233 spin_lock_irqsave(&host_info_lock
, flags
);
234 hi
= find_host_info(host
);
237 for (lh
= hi
->file_info_list
.next
; lh
!= &hi
->file_info_list
;
239 fi
= list_entry(lh
, struct file_info
, list
);
241 if (!(fi
->listen_channels
& (1ULL << channel
))) {
245 req
= __alloc_pending_request(SLAB_ATOMIC
);
249 ibs
= kmalloc(sizeof(struct iso_block_store
)
250 + length
, SLAB_ATOMIC
);
256 atomic_add(length
, &iso_buffer_size
);
257 atomic_set(&ibs
->refcount
, 0);
258 ibs
->data_size
= length
;
259 memcpy(ibs
->data
, data
, length
);
262 atomic_inc(&ibs
->refcount
);
266 req
->data
= ibs
->data
;
267 req
->req
.type
= RAW1394_REQ_ISO_RECEIVE
;
268 req
->req
.generation
= get_hpsb_generation();
270 req
->req
.recvb
= (u64
)fi
->iso_buffer
;
271 req
->req
.length
= MIN(length
, fi
->iso_buffer_length
);
273 list_add_tail(&req
->list
, &reqs
);
276 spin_unlock_irqrestore(&host_info_lock
, flags
);
279 while (lh
!= &reqs
) {
280 req
= list_entry(lh
, struct pending_request
, list
);
282 queue_complete_req(req
);
286 static void fcp_request(struct hpsb_host
*host
, int nodeid
, int direction
,
287 int cts
, u8
*data
, unsigned int length
)
290 struct list_head
*lh
;
291 struct host_info
*hi
;
292 struct file_info
*fi
;
293 struct pending_request
*req
;
294 struct iso_block_store
*ibs
= NULL
;
297 if ((atomic_read(&iso_buffer_size
) + length
) > iso_buffer_max
) {
298 HPSB_INFO("dropped fcp request");
302 spin_lock_irqsave(&host_info_lock
, flags
);
303 hi
= find_host_info(host
);
306 for (lh
= hi
->file_info_list
.next
; lh
!= &hi
->file_info_list
;
308 fi
= list_entry(lh
, struct file_info
, list
);
310 if (!fi
->fcp_buffer
) {
314 req
= __alloc_pending_request(SLAB_ATOMIC
);
318 ibs
= kmalloc(sizeof(struct iso_block_store
)
319 + length
, SLAB_ATOMIC
);
325 atomic_add(length
, &iso_buffer_size
);
326 atomic_set(&ibs
->refcount
, 0);
327 ibs
->data_size
= length
;
328 memcpy(ibs
->data
, data
, length
);
331 atomic_inc(&ibs
->refcount
);
335 req
->data
= ibs
->data
;
336 req
->req
.type
= RAW1394_REQ_FCP_REQUEST
;
337 req
->req
.generation
= get_hpsb_generation();
338 req
->req
.misc
= nodeid
| (direction
<< 16);
339 req
->req
.recvb
= (u64
)fi
->fcp_buffer
;
340 req
->req
.length
= length
;
342 list_add_tail(&req
->list
, &reqs
);
345 spin_unlock_irqrestore(&host_info_lock
, flags
);
348 while (lh
!= &reqs
) {
349 req
= list_entry(lh
, struct pending_request
, list
);
351 queue_complete_req(req
);
356 static ssize_t
dev_read(struct file
*file
, char *buffer
, size_t count
,
357 loff_t
*offset_is_ignored
)
359 struct file_info
*fi
= (struct file_info
*)file
->private_data
;
360 struct list_head
*lh
;
361 struct pending_request
*req
;
363 if (count
!= sizeof(struct raw1394_request
)) {
367 if (!access_ok(VERIFY_WRITE
, buffer
, count
)) {
371 if (file
->f_flags
& O_NONBLOCK
) {
372 if (down_trylock(&fi
->complete_sem
)) {
376 if (down_interruptible(&fi
->complete_sem
)) {
381 spin_lock_irq(&fi
->reqlists_lock
);
382 lh
= fi
->req_complete
.next
;
384 spin_unlock_irq(&fi
->reqlists_lock
);
386 req
= list_entry(lh
, struct pending_request
, list
);
388 if (req
->req
.length
) {
389 if (copy_to_user((void *)req
->req
.recvb
, req
->data
,
391 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
394 __copy_to_user(buffer
, &req
->req
, sizeof(req
->req
));
396 free_pending_request(req
);
397 return sizeof(struct raw1394_request
);
401 static int state_opened(struct file_info
*fi
, struct pending_request
*req
)
403 if (req
->req
.type
== RAW1394_REQ_INITIALIZE
) {
404 if (req
->req
.misc
== RAW1394_KERNELAPI_VERSION
) {
405 fi
->state
= initialized
;
406 req
->req
.error
= RAW1394_ERROR_NONE
;
407 req
->req
.generation
= get_hpsb_generation();
409 req
->req
.error
= RAW1394_ERROR_COMPAT
;
410 req
->req
.misc
= RAW1394_KERNELAPI_VERSION
;
413 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
417 queue_complete_req(req
);
418 return sizeof(struct raw1394_request
);
421 static int state_initialized(struct file_info
*fi
, struct pending_request
*req
)
423 struct list_head
*lh
;
424 struct host_info
*hi
;
425 struct raw1394_khost_list
*khl
;
427 if (req
->req
.generation
!= get_hpsb_generation()) {
428 req
->req
.error
= RAW1394_ERROR_GENERATION
;
429 req
->req
.generation
= get_hpsb_generation();
431 queue_complete_req(req
);
432 return sizeof(struct raw1394_request
);
435 switch (req
->req
.type
) {
436 case RAW1394_REQ_LIST_CARDS
:
437 spin_lock_irq(&host_info_lock
);
438 khl
= kmalloc(sizeof(struct raw1394_khost_list
) * host_count
,
442 req
->req
.misc
= host_count
;
443 req
->data
= (quadlet_t
*)khl
;
445 lh
= host_info_list
.next
;
446 while (lh
!= &host_info_list
) {
447 hi
= list_entry(lh
, struct host_info
, list
);
449 khl
->nodes
= hi
->host
->node_count
;
450 strcpy(khl
->name
, hi
->host
->template->name
);
456 spin_unlock_irq(&host_info_lock
);
459 req
->req
.error
= RAW1394_ERROR_NONE
;
460 req
->req
.length
= MIN(req
->req
.length
,
461 sizeof(struct raw1394_khost_list
)
469 case RAW1394_REQ_SET_CARD
:
472 spin_lock_irq(&host_info_lock
);
473 if (req
->req
.misc
< host_count
) {
474 lh
= host_info_list
.next
;
475 while (req
->req
.misc
--) {
478 hi
= list_entry(lh
, struct host_info
, list
);
479 hpsb_inc_host_usage(hi
->host
);
480 list_add_tail(&fi
->list
, &hi
->file_info_list
);
482 fi
->state
= connected
;
484 spin_unlock_irq(&host_info_lock
);
487 req
->req
.error
= RAW1394_ERROR_NONE
;
488 req
->req
.misc
= (fi
->host
->node_id
<< 16)
489 | fi
->host
->node_count
;
491 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
498 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
503 queue_complete_req(req
);
504 return sizeof(struct raw1394_request
);
507 static void handle_iso_listen(struct file_info
*fi
, struct pending_request
*req
)
509 int channel
= req
->req
.misc
;
511 spin_lock(&host_info_lock
);
512 if ((channel
> 63) || (channel
< -64)) {
513 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
514 } else if (channel
>= 0) {
515 /* allocate channel req.misc */
516 if (fi
->listen_channels
& (1ULL << channel
)) {
517 req
->req
.error
= RAW1394_ERROR_ALREADY
;
519 fi
->listen_channels
|= 1ULL << channel
;
520 hpsb_listen_channel(hl_handle
, fi
->host
, channel
);
521 fi
->iso_buffer
= (void *)req
->req
.recvb
;
522 fi
->iso_buffer_length
= req
->req
.length
;
525 /* deallocate channel (one's complement neg) req.misc */
528 if (fi
->listen_channels
& (1ULL << channel
)) {
529 hpsb_unlisten_channel(hl_handle
, fi
->host
, channel
);
530 fi
->listen_channels
&= ~(1ULL << channel
);
532 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
537 queue_complete_req(req
);
538 spin_unlock(&host_info_lock
);
541 static void handle_fcp_listen(struct file_info
*fi
, struct pending_request
*req
)
544 if (fi
->fcp_buffer
) {
545 req
->req
.error
= RAW1394_ERROR_ALREADY
;
547 fi
->fcp_buffer
= (u8
*)req
->req
.recvb
;
550 if (!fi
->fcp_buffer
) {
551 req
->req
.error
= RAW1394_ERROR_ALREADY
;
553 fi
->fcp_buffer
= NULL
;
558 queue_complete_req(req
);
561 static int handle_local_request(struct file_info
*fi
,
562 struct pending_request
*req
, int node
)
564 u64 addr
= req
->req
.address
& 0xffffffffffffULL
;
566 req
->data
= kmalloc(req
->req
.length
, SLAB_KERNEL
);
567 if (!req
->data
) return -ENOMEM
;
570 switch (req
->req
.type
) {
571 case RAW1394_REQ_ASYNC_READ
:
572 req
->req
.error
= highlevel_read(fi
->host
, node
, req
->data
, addr
,
576 case RAW1394_REQ_ASYNC_WRITE
:
577 if (copy_from_user(req
->data
, (void *)req
->req
.sendb
,
579 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
583 req
->req
.error
= highlevel_write(fi
->host
, node
, req
->data
,
584 addr
, req
->req
.length
);
588 case RAW1394_REQ_LOCK
:
589 if ((req
->req
.misc
== EXTCODE_FETCH_ADD
)
590 || (req
->req
.misc
== EXTCODE_LITTLE_ADD
)) {
591 if (req
->req
.length
!= 4) {
592 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
596 if (req
->req
.length
!= 8) {
597 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
602 if (copy_from_user(req
->data
, (void *)req
->req
.sendb
,
604 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
608 if (req
->req
.length
== 8) {
609 req
->req
.error
= highlevel_lock(fi
->host
, node
,
616 req
->req
.error
= highlevel_lock(fi
->host
, node
,
623 case RAW1394_REQ_LOCK64
:
625 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
628 if (req
->req
.error
) req
->req
.length
= 0;
629 req
->req
.error
|= 0x00100000;
630 queue_complete_req(req
);
631 return sizeof(struct raw1394_request
);
634 static int handle_remote_request(struct file_info
*fi
,
635 struct pending_request
*req
, int node
)
637 struct hpsb_packet
*packet
= NULL
;
638 u64 addr
= req
->req
.address
& 0xffffffffffffULL
;
640 switch (req
->req
.type
) {
641 case RAW1394_REQ_ASYNC_READ
:
642 if (req
->req
.length
== 4) {
643 packet
= hpsb_make_readqpacket(fi
->host
, node
, addr
);
644 if (!packet
) return -ENOMEM
;
646 req
->data
= &packet
->header
[3];
648 packet
= hpsb_make_readbpacket(fi
->host
, node
, addr
,
650 if (!packet
) return -ENOMEM
;
652 req
->data
= packet
->data
;
656 case RAW1394_REQ_ASYNC_WRITE
:
657 if (req
->req
.length
== 4) {
660 if (copy_from_user(&x
, (void *)req
->req
.sendb
, 4)) {
661 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
664 packet
= hpsb_make_writeqpacket(fi
->host
, node
, addr
,
666 if (!packet
) return -ENOMEM
;
668 packet
= hpsb_make_writebpacket(fi
->host
, node
, addr
,
670 if (!packet
) return -ENOMEM
;
672 if (copy_from_user(packet
->data
, (void *)req
->req
.sendb
,
674 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
680 case RAW1394_REQ_LOCK
:
681 if ((req
->req
.misc
== EXTCODE_FETCH_ADD
)
682 || (req
->req
.misc
== EXTCODE_LITTLE_ADD
)) {
683 if (req
->req
.length
!= 4) {
684 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
688 if (req
->req
.length
!= 8) {
689 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
694 packet
= hpsb_make_lockpacket(fi
->host
, node
, addr
,
696 if (!packet
) return -ENOMEM
;
698 if (copy_from_user(packet
->data
, (void *)req
->req
.sendb
,
700 req
->req
.error
= RAW1394_ERROR_MEMFAULT
;
704 req
->data
= packet
->data
;
708 case RAW1394_REQ_LOCK64
:
710 req
->req
.error
= RAW1394_ERROR_STATE_ORDER
;
713 req
->packet
= packet
;
715 if (req
->req
.error
) {
717 queue_complete_req(req
);
718 return sizeof(struct raw1394_request
);
722 queue_task(&req
->tq
, &packet
->complete_tq
);
724 spin_lock_irq(&fi
->reqlists_lock
);
725 list_add_tail(&req
->list
, &fi
->req_pending
);
726 spin_unlock_irq(&fi
->reqlists_lock
);
728 if (!hpsb_send_packet(packet
)) {
729 req
->req
.error
= RAW1394_ERROR_SEND_ERROR
;
731 free_tlabel(packet
->host
, packet
->node_id
, packet
->tlabel
);
732 queue_complete_req(req
);
734 return sizeof(struct raw1394_request
);
737 static int state_connected(struct file_info
*fi
, struct pending_request
*req
)
739 int node
= req
->req
.address
>> 48;
741 req
->req
.error
= RAW1394_ERROR_NONE
;
743 if (req
->req
.generation
!= get_hpsb_generation()) {
744 req
->req
.error
= RAW1394_ERROR_GENERATION
;
745 req
->req
.generation
= get_hpsb_generation();
747 queue_complete_req(req
);
748 return sizeof(struct raw1394_request
);
751 if (req
->req
.type
== RAW1394_REQ_ISO_LISTEN
) {
752 handle_iso_listen(fi
, req
);
753 return sizeof(struct raw1394_request
);
756 if (req
->req
.type
== RAW1394_REQ_FCP_LISTEN
) {
757 handle_fcp_listen(fi
, req
);
758 return sizeof(struct raw1394_request
);
761 if (req
->req
.length
== 0) {
762 req
->req
.error
= RAW1394_ERROR_INVALID_ARG
;
763 queue_complete_req(req
);
764 return sizeof(struct raw1394_request
);
767 if (fi
->host
->node_id
== node
) {
768 return handle_local_request(fi
, req
, node
);
771 return handle_remote_request(fi
, req
, node
);
775 static ssize_t
dev_write(struct file
*file
, const char *buffer
, size_t count
,
776 loff_t
*offset_is_ignored
)
778 struct file_info
*fi
= (struct file_info
*)file
->private_data
;
779 struct pending_request
*req
;
782 if (count
!= sizeof(struct raw1394_request
)) {
786 req
= alloc_pending_request();
792 if (copy_from_user(&req
->req
, buffer
, sizeof(struct raw1394_request
))) {
793 free_pending_request(req
);
799 retval
= state_opened(fi
, req
);
803 retval
= state_initialized(fi
, req
);
807 retval
= state_connected(fi
, req
);
812 free_pending_request(req
);
818 static unsigned int dev_poll(struct file
*file
, poll_table
*pt
)
820 struct file_info
*fi
= file
->private_data
;
821 unsigned int mask
= POLLOUT
| POLLWRNORM
;
823 poll_wait(file
, &fi
->poll_wait_complete
, pt
);
825 spin_lock_irq(&fi
->reqlists_lock
);
826 if (!list_empty(&fi
->req_complete
)) {
827 mask
|= POLLIN
| POLLRDNORM
;
829 spin_unlock_irq(&fi
->reqlists_lock
);
834 static int dev_open(struct inode
*inode
, struct file
*file
)
836 struct file_info
*fi
;
838 if (MINOR(inode
->i_rdev
)) {
842 V22_COMPAT_MOD_INC_USE_COUNT
;
844 fi
= kmalloc(sizeof(struct file_info
), SLAB_KERNEL
);
846 V22_COMPAT_MOD_DEC_USE_COUNT
;
850 memset(fi
, 0, sizeof(struct file_info
));
852 INIT_LIST_HEAD(&fi
->list
);
854 INIT_LIST_HEAD(&fi
->req_pending
);
855 INIT_LIST_HEAD(&fi
->req_complete
);
856 sema_init(&fi
->complete_sem
, 0);
857 spin_lock_init(&fi
->reqlists_lock
);
858 init_waitqueue_head(&fi
->poll_wait_complete
);
860 file
->private_data
= fi
;
865 static int dev_release(struct inode
*inode
, struct file
*file
)
867 struct file_info
*fi
= file
->private_data
;
868 struct list_head
*lh
;
869 struct pending_request
*req
;
872 for (i
= 0; i
< 64; i
++) {
873 if (fi
->listen_channels
& (1ULL << i
)) {
874 hpsb_unlisten_channel(hl_handle
, fi
->host
, i
);
878 spin_lock(&host_info_lock
);
879 fi
->listen_channels
= 0;
880 spin_unlock(&host_info_lock
);
883 spin_lock_irq(&fi
->reqlists_lock
);
885 while (!list_empty(&fi
->req_complete
)) {
886 lh
= fi
->req_complete
.next
;
889 req
= list_entry(lh
, struct pending_request
, list
);
891 free_pending_request(req
);
894 if (list_empty(&fi
->req_pending
)) {
898 spin_unlock_irq(&fi
->reqlists_lock
);
901 down_interruptible(&fi
->complete_sem
);
905 if (fi
->state
== connected
) {
906 spin_lock_irq(&host_info_lock
);
908 spin_unlock_irq(&host_info_lock
);
910 hpsb_dec_host_usage(fi
->host
);
915 V22_COMPAT_MOD_DEC_USE_COUNT
;
919 static struct hpsb_highlevel_ops hl_ops
= {
921 remove_host
: remove_host
,
922 host_reset
: host_reset
,
923 iso_receive
: iso_receive
,
924 fcp_request
: fcp_request
,
927 static struct file_operations file_ops
= {
933 release
: dev_release
,
936 int init_raw1394(void)
938 hl_handle
= hpsb_register_highlevel(RAW1394_DEVICE_NAME
, &hl_ops
);
939 if (hl_handle
== NULL
) {
940 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
944 devfs_handle
= devfs_register(NULL
, RAW1394_DEVICE_NAME
, DEVFS_FL_NONE
,
945 RAW1394_DEVICE_MAJOR
, 0,
946 S_IFCHR
| S_IRUSR
| S_IWUSR
, &file_ops
,
949 if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR
, RAW1394_DEVICE_NAME
,
951 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
954 printk(KERN_INFO
"raw1394: /dev/%s device initialized\n", RAW1394_DEVICE_NAME
);
958 void cleanup_raw1394(void)
960 devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR
, RAW1394_DEVICE_NAME
);
961 devfs_unregister(devfs_handle
);
962 hpsb_unregister_highlevel(hl_handle
);
967 int init_module(void)
969 return init_raw1394();
972 void cleanup_module(void)
974 return cleanup_raw1394();