This is pre8 ...
[linux-2.6/linux-mips.git] / drivers / ieee1394 / raw1394.c
blob83c1169fe9a429b7fca8fae4b3b96d1b2cc6a30c
1 /*
2 * IEEE 1394 for Linux
4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <linux/fs.h>
17 #include <linux/poll.h>
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #include <asm/uaccess.h>
22 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
23 #include <linux/devfs_fs_kernel.h>
24 #endif
26 #include "ieee1394.h"
27 #include "ieee1394_types.h"
28 #include "ieee1394_core.h"
29 #include "hosts.h"
30 #include "highlevel.h"
31 #include "ieee1394_transactions.h"
32 #include "raw1394.h"
35 static devfs_handle_t devfs_handle = NULL;
37 LIST_HEAD(host_info_list);
38 static int host_count = 0;
39 spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
41 static struct hpsb_highlevel *hl_handle = NULL;
43 static atomic_t iso_buffer_size;
44 static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
46 static void queue_complete_cb(struct pending_request *req);
48 static struct pending_request *__alloc_pending_request(int flags)
50 struct pending_request *req;
52 req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
53 flags);
54 if (req != NULL) {
55 memset(req, 0, sizeof(struct pending_request));
56 INIT_LIST_HEAD(&req->list);
57 req->tq.routine = (void(*)(void*))queue_complete_cb;
60 return req;
63 inline static struct pending_request *alloc_pending_request(void)
65 return __alloc_pending_request(SLAB_KERNEL);
68 static void free_pending_request(struct pending_request *req)
70 if (req->ibs) {
71 if (atomic_dec_and_test(&req->ibs->refcount)) {
72 atomic_sub(req->ibs->data_size, &iso_buffer_size);
73 kfree(req->ibs);
75 } else if (req->free_data) {
76 kfree(req->data);
78 free_hpsb_packet(req->packet);
79 kfree(req);
82 static void queue_complete_req(struct pending_request *req)
84 unsigned long flags;
85 struct file_info *fi = req->file_info;
87 spin_lock_irqsave(&fi->reqlists_lock, flags);
88 list_del(&req->list);
89 list_add_tail(&req->list, &fi->req_complete);
90 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
92 up(&fi->complete_sem);
93 wake_up_interruptible(&fi->poll_wait_complete);
96 static void queue_complete_cb(struct pending_request *req)
98 struct hpsb_packet *packet = req->packet;
99 int rcode = (packet->header[1] >> 12) & 0xf;
101 switch (packet->ack_code) {
102 case ACKX_NONE:
103 case ACKX_SEND_ERROR:
104 req->req.error = RAW1394_ERROR_SEND_ERROR;
105 break;
106 case ACKX_ABORTED:
107 req->req.error = RAW1394_ERROR_ABORTED;
108 break;
109 case ACKX_TIMEOUT:
110 req->req.error = RAW1394_ERROR_TIMEOUT;
111 break;
112 default:
113 req->req.error = (packet->ack_code << 16) | rcode;
114 break;
117 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
118 req->req.length = 0;
121 free_tlabel(packet->host, packet->node_id, packet->tlabel);
123 queue_complete_req(req);
127 static void add_host(struct hpsb_host *host)
129 struct host_info *hi;
131 hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
132 if (hi != NULL) {
133 INIT_LIST_HEAD(&hi->list);
134 hi->host = host;
135 INIT_LIST_HEAD(&hi->file_info_list);
137 spin_lock_irq(&host_info_lock);
138 list_add_tail(&hi->list, &host_info_list);
139 host_count++;
140 spin_unlock_irq(&host_info_lock);
145 static struct host_info *find_host_info(struct hpsb_host *host)
147 struct list_head *lh;
148 struct host_info *hi;
150 lh = host_info_list.next;
151 while (lh != &host_info_list) {
152 hi = list_entry(lh, struct host_info, list);
153 if (hi->host == host) {
154 return hi;
156 lh = lh->next;
159 return NULL;
162 static void remove_host(struct hpsb_host *host)
164 struct host_info *hi;
166 spin_lock_irq(&host_info_lock);
167 hi = find_host_info(host);
169 if (hi != NULL) {
170 list_del(&hi->list);
171 host_count--;
173 spin_unlock_irq(&host_info_lock);
175 if (hi == NULL) {
176 printk(KERN_ERR "raw1394: attempt to remove unknown host "
177 "0x%p\n", host);
178 return;
181 kfree(hi);
184 static void host_reset(struct hpsb_host *host)
186 unsigned long flags;
187 struct list_head *lh;
188 struct host_info *hi;
189 struct file_info *fi;
190 struct pending_request *req;
192 spin_lock_irqsave(&host_info_lock, flags);
193 hi = find_host_info(host);
195 if (hi != NULL) {
196 lh = hi->file_info_list.next;
198 while (lh != &hi->file_info_list) {
199 fi = list_entry(lh, struct file_info, list);
200 req = __alloc_pending_request(SLAB_ATOMIC);
202 if (req != NULL) {
203 req->file_info = fi;
204 req->req.type = RAW1394_REQ_BUS_RESET;
205 req->req.generation = get_hpsb_generation();
206 req->req.misc = (host->node_id << 16)
207 | host->node_count;
208 queue_complete_req(req);
211 lh = lh->next;
214 spin_unlock_irqrestore(&host_info_lock, flags);
217 static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
218 unsigned int length)
220 unsigned long flags;
221 struct list_head *lh;
222 struct host_info *hi;
223 struct file_info *fi;
224 struct pending_request *req;
225 struct iso_block_store *ibs = NULL;
226 LIST_HEAD(reqs);
228 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
229 HPSB_INFO("dropped iso packet");
230 return;
233 spin_lock_irqsave(&host_info_lock, flags);
234 hi = find_host_info(host);
236 if (hi != NULL) {
237 for (lh = hi->file_info_list.next; lh != &hi->file_info_list;
238 lh = lh->next) {
239 fi = list_entry(lh, struct file_info, list);
241 if (!(fi->listen_channels & (1ULL << channel))) {
242 continue;
245 req = __alloc_pending_request(SLAB_ATOMIC);
246 if (!req) break;
248 if (!ibs) {
249 ibs = kmalloc(sizeof(struct iso_block_store)
250 + length, SLAB_ATOMIC);
251 if (!ibs) {
252 kfree(req);
253 break;
256 atomic_add(length, &iso_buffer_size);
257 atomic_set(&ibs->refcount, 0);
258 ibs->data_size = length;
259 memcpy(ibs->data, data, length);
262 atomic_inc(&ibs->refcount);
264 req->file_info = fi;
265 req->ibs = ibs;
266 req->data = ibs->data;
267 req->req.type = RAW1394_REQ_ISO_RECEIVE;
268 req->req.generation = get_hpsb_generation();
269 req->req.misc = 0;
270 req->req.recvb = (u64)fi->iso_buffer;
271 req->req.length = MIN(length, fi->iso_buffer_length);
273 list_add_tail(&req->list, &reqs);
276 spin_unlock_irqrestore(&host_info_lock, flags);
278 lh = reqs.next;
279 while (lh != &reqs) {
280 req = list_entry(lh, struct pending_request, list);
281 lh = lh->next;
282 queue_complete_req(req);
286 static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
287 int cts, u8 *data, unsigned int length)
289 unsigned long flags;
290 struct list_head *lh;
291 struct host_info *hi;
292 struct file_info *fi;
293 struct pending_request *req;
294 struct iso_block_store *ibs = NULL;
295 LIST_HEAD(reqs);
297 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
298 HPSB_INFO("dropped fcp request");
299 return;
302 spin_lock_irqsave(&host_info_lock, flags);
303 hi = find_host_info(host);
305 if (hi != NULL) {
306 for (lh = hi->file_info_list.next; lh != &hi->file_info_list;
307 lh = lh->next) {
308 fi = list_entry(lh, struct file_info, list);
310 if (!fi->fcp_buffer) {
311 continue;
314 req = __alloc_pending_request(SLAB_ATOMIC);
315 if (!req) break;
317 if (!ibs) {
318 ibs = kmalloc(sizeof(struct iso_block_store)
319 + length, SLAB_ATOMIC);
320 if (!ibs) {
321 kfree(req);
322 break;
325 atomic_add(length, &iso_buffer_size);
326 atomic_set(&ibs->refcount, 0);
327 ibs->data_size = length;
328 memcpy(ibs->data, data, length);
331 atomic_inc(&ibs->refcount);
333 req->file_info = fi;
334 req->ibs = ibs;
335 req->data = ibs->data;
336 req->req.type = RAW1394_REQ_FCP_REQUEST;
337 req->req.generation = get_hpsb_generation();
338 req->req.misc = nodeid | (direction << 16);
339 req->req.recvb = (u64)fi->fcp_buffer;
340 req->req.length = length;
342 list_add_tail(&req->list, &reqs);
345 spin_unlock_irqrestore(&host_info_lock, flags);
347 lh = reqs.next;
348 while (lh != &reqs) {
349 req = list_entry(lh, struct pending_request, list);
350 lh = lh->next;
351 queue_complete_req(req);
356 static ssize_t dev_read(struct file *file, char *buffer, size_t count,
357 loff_t *offset_is_ignored)
359 struct file_info *fi = (struct file_info *)file->private_data;
360 struct list_head *lh;
361 struct pending_request *req;
363 if (count != sizeof(struct raw1394_request)) {
364 return -EINVAL;
367 if (!access_ok(VERIFY_WRITE, buffer, count)) {
368 return -EFAULT;
371 if (file->f_flags & O_NONBLOCK) {
372 if (down_trylock(&fi->complete_sem)) {
373 return -EAGAIN;
375 } else {
376 if (down_interruptible(&fi->complete_sem)) {
377 return -ERESTARTSYS;
381 spin_lock_irq(&fi->reqlists_lock);
382 lh = fi->req_complete.next;
383 list_del(lh);
384 spin_unlock_irq(&fi->reqlists_lock);
386 req = list_entry(lh, struct pending_request, list);
388 if (req->req.length) {
389 if (copy_to_user((void *)req->req.recvb, req->data,
390 req->req.length)) {
391 req->req.error = RAW1394_ERROR_MEMFAULT;
394 __copy_to_user(buffer, &req->req, sizeof(req->req));
396 free_pending_request(req);
397 return sizeof(struct raw1394_request);
401 static int state_opened(struct file_info *fi, struct pending_request *req)
403 if (req->req.type == RAW1394_REQ_INITIALIZE) {
404 if (req->req.misc == RAW1394_KERNELAPI_VERSION) {
405 fi->state = initialized;
406 req->req.error = RAW1394_ERROR_NONE;
407 req->req.generation = get_hpsb_generation();
408 } else {
409 req->req.error = RAW1394_ERROR_COMPAT;
410 req->req.misc = RAW1394_KERNELAPI_VERSION;
412 } else {
413 req->req.error = RAW1394_ERROR_STATE_ORDER;
416 req->req.length = 0;
417 queue_complete_req(req);
418 return sizeof(struct raw1394_request);
421 static int state_initialized(struct file_info *fi, struct pending_request *req)
423 struct list_head *lh;
424 struct host_info *hi;
425 struct raw1394_khost_list *khl;
427 if (req->req.generation != get_hpsb_generation()) {
428 req->req.error = RAW1394_ERROR_GENERATION;
429 req->req.generation = get_hpsb_generation();
430 req->req.length = 0;
431 queue_complete_req(req);
432 return sizeof(struct raw1394_request);
435 switch (req->req.type) {
436 case RAW1394_REQ_LIST_CARDS:
437 spin_lock_irq(&host_info_lock);
438 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
439 SLAB_ATOMIC);
441 if (khl != NULL) {
442 req->req.misc = host_count;
443 req->data = (quadlet_t *)khl;
445 lh = host_info_list.next;
446 while (lh != &host_info_list) {
447 hi = list_entry(lh, struct host_info, list);
449 khl->nodes = hi->host->node_count;
450 strcpy(khl->name, hi->host->template->name);
452 khl++;
453 lh = lh->next;
456 spin_unlock_irq(&host_info_lock);
458 if (khl != NULL) {
459 req->req.error = RAW1394_ERROR_NONE;
460 req->req.length = MIN(req->req.length,
461 sizeof(struct raw1394_khost_list)
462 * req->req.misc);
463 req->free_data = 1;
464 } else {
465 return -ENOMEM;
467 break;
469 case RAW1394_REQ_SET_CARD:
470 lh = NULL;
472 spin_lock_irq(&host_info_lock);
473 if (req->req.misc < host_count) {
474 lh = host_info_list.next;
475 while (req->req.misc--) {
476 lh = lh->next;
478 hi = list_entry(lh, struct host_info, list);
479 hpsb_inc_host_usage(hi->host);
480 list_add_tail(&fi->list, &hi->file_info_list);
481 fi->host = hi->host;
482 fi->state = connected;
484 spin_unlock_irq(&host_info_lock);
486 if (lh != NULL) {
487 req->req.error = RAW1394_ERROR_NONE;
488 req->req.misc = (fi->host->node_id << 16)
489 | fi->host->node_count;
490 } else {
491 req->req.error = RAW1394_ERROR_INVALID_ARG;
494 req->req.length = 0;
495 break;
497 default:
498 req->req.error = RAW1394_ERROR_STATE_ORDER;
499 req->req.length = 0;
500 break;
503 queue_complete_req(req);
504 return sizeof(struct raw1394_request);
507 static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
509 int channel = req->req.misc;
511 spin_lock(&host_info_lock);
512 if ((channel > 63) || (channel < -64)) {
513 req->req.error = RAW1394_ERROR_INVALID_ARG;
514 } else if (channel >= 0) {
515 /* allocate channel req.misc */
516 if (fi->listen_channels & (1ULL << channel)) {
517 req->req.error = RAW1394_ERROR_ALREADY;
518 } else {
519 fi->listen_channels |= 1ULL << channel;
520 hpsb_listen_channel(hl_handle, fi->host, channel);
521 fi->iso_buffer = (void *)req->req.recvb;
522 fi->iso_buffer_length = req->req.length;
524 } else {
525 /* deallocate channel (one's complement neg) req.misc */
526 channel = ~channel;
528 if (fi->listen_channels & (1ULL << channel)) {
529 hpsb_unlisten_channel(hl_handle, fi->host, channel);
530 fi->listen_channels &= ~(1ULL << channel);
531 } else {
532 req->req.error = RAW1394_ERROR_INVALID_ARG;
536 req->req.length = 0;
537 queue_complete_req(req);
538 spin_unlock(&host_info_lock);
541 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
543 if (req->req.misc) {
544 if (fi->fcp_buffer) {
545 req->req.error = RAW1394_ERROR_ALREADY;
546 } else {
547 fi->fcp_buffer = (u8 *)req->req.recvb;
549 } else {
550 if (!fi->fcp_buffer) {
551 req->req.error = RAW1394_ERROR_ALREADY;
552 } else {
553 fi->fcp_buffer = NULL;
557 req->req.length = 0;
558 queue_complete_req(req);
561 static int handle_local_request(struct file_info *fi,
562 struct pending_request *req, int node)
564 u64 addr = req->req.address & 0xffffffffffffULL;
566 req->data = kmalloc(req->req.length, SLAB_KERNEL);
567 if (!req->data) return -ENOMEM;
568 req->free_data = 1;
570 switch (req->req.type) {
571 case RAW1394_REQ_ASYNC_READ:
572 req->req.error = highlevel_read(fi->host, node, req->data, addr,
573 req->req.length);
574 break;
576 case RAW1394_REQ_ASYNC_WRITE:
577 if (copy_from_user(req->data, (void *)req->req.sendb,
578 req->req.length)) {
579 req->req.error = RAW1394_ERROR_MEMFAULT;
580 break;
583 req->req.error = highlevel_write(fi->host, node, req->data,
584 addr, req->req.length);
585 req->req.length = 0;
586 break;
588 case RAW1394_REQ_LOCK:
589 if ((req->req.misc == EXTCODE_FETCH_ADD)
590 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
591 if (req->req.length != 4) {
592 req->req.error = RAW1394_ERROR_INVALID_ARG;
593 break;
595 } else {
596 if (req->req.length != 8) {
597 req->req.error = RAW1394_ERROR_INVALID_ARG;
598 break;
602 if (copy_from_user(req->data, (void *)req->req.sendb,
603 req->req.length)) {
604 req->req.error = RAW1394_ERROR_MEMFAULT;
605 break;
608 if (req->req.length == 8) {
609 req->req.error = highlevel_lock(fi->host, node,
610 req->data, addr,
611 req->data[1],
612 req->data[0],
613 req->req.misc);
614 req->req.length = 4;
615 } else {
616 req->req.error = highlevel_lock(fi->host, node,
617 req->data, addr,
618 req->data[0], 0,
619 req->req.misc);
621 break;
623 case RAW1394_REQ_LOCK64:
624 default:
625 req->req.error = RAW1394_ERROR_STATE_ORDER;
628 if (req->req.error) req->req.length = 0;
629 req->req.error |= 0x00100000;
630 queue_complete_req(req);
631 return sizeof(struct raw1394_request);
634 static int handle_remote_request(struct file_info *fi,
635 struct pending_request *req, int node)
637 struct hpsb_packet *packet = NULL;
638 u64 addr = req->req.address & 0xffffffffffffULL;
640 switch (req->req.type) {
641 case RAW1394_REQ_ASYNC_READ:
642 if (req->req.length == 4) {
643 packet = hpsb_make_readqpacket(fi->host, node, addr);
644 if (!packet) return -ENOMEM;
646 req->data = &packet->header[3];
647 } else {
648 packet = hpsb_make_readbpacket(fi->host, node, addr,
649 req->req.length);
650 if (!packet) return -ENOMEM;
652 req->data = packet->data;
654 break;
656 case RAW1394_REQ_ASYNC_WRITE:
657 if (req->req.length == 4) {
658 quadlet_t x;
660 if (copy_from_user(&x, (void *)req->req.sendb, 4)) {
661 req->req.error = RAW1394_ERROR_MEMFAULT;
664 packet = hpsb_make_writeqpacket(fi->host, node, addr,
666 if (!packet) return -ENOMEM;
667 } else {
668 packet = hpsb_make_writebpacket(fi->host, node, addr,
669 req->req.length);
670 if (!packet) return -ENOMEM;
672 if (copy_from_user(packet->data, (void *)req->req.sendb,
673 req->req.length)) {
674 req->req.error = RAW1394_ERROR_MEMFAULT;
677 req->req.length = 0;
678 break;
680 case RAW1394_REQ_LOCK:
681 if ((req->req.misc == EXTCODE_FETCH_ADD)
682 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
683 if (req->req.length != 4) {
684 req->req.error = RAW1394_ERROR_INVALID_ARG;
685 break;
687 } else {
688 if (req->req.length != 8) {
689 req->req.error = RAW1394_ERROR_INVALID_ARG;
690 break;
694 packet = hpsb_make_lockpacket(fi->host, node, addr,
695 req->req.misc);
696 if (!packet) return -ENOMEM;
698 if (copy_from_user(packet->data, (void *)req->req.sendb,
699 req->req.length)) {
700 req->req.error = RAW1394_ERROR_MEMFAULT;
701 break;
704 req->data = packet->data;
705 req->req.length = 4;
706 break;
708 case RAW1394_REQ_LOCK64:
709 default:
710 req->req.error = RAW1394_ERROR_STATE_ORDER;
713 req->packet = packet;
715 if (req->req.error) {
716 req->req.length = 0;
717 queue_complete_req(req);
718 return sizeof(struct raw1394_request);
721 req->tq.data = req;
722 queue_task(&req->tq, &packet->complete_tq);
724 spin_lock_irq(&fi->reqlists_lock);
725 list_add_tail(&req->list, &fi->req_pending);
726 spin_unlock_irq(&fi->reqlists_lock);
728 if (!hpsb_send_packet(packet)) {
729 req->req.error = RAW1394_ERROR_SEND_ERROR;
730 req->req.length = 0;
731 free_tlabel(packet->host, packet->node_id, packet->tlabel);
732 queue_complete_req(req);
734 return sizeof(struct raw1394_request);
737 static int state_connected(struct file_info *fi, struct pending_request *req)
739 int node = req->req.address >> 48;
741 req->req.error = RAW1394_ERROR_NONE;
743 if (req->req.generation != get_hpsb_generation()) {
744 req->req.error = RAW1394_ERROR_GENERATION;
745 req->req.generation = get_hpsb_generation();
746 req->req.length = 0;
747 queue_complete_req(req);
748 return sizeof(struct raw1394_request);
751 if (req->req.type == RAW1394_REQ_ISO_LISTEN) {
752 handle_iso_listen(fi, req);
753 return sizeof(struct raw1394_request);
756 if (req->req.type == RAW1394_REQ_FCP_LISTEN) {
757 handle_fcp_listen(fi, req);
758 return sizeof(struct raw1394_request);
761 if (req->req.length == 0) {
762 req->req.error = RAW1394_ERROR_INVALID_ARG;
763 queue_complete_req(req);
764 return sizeof(struct raw1394_request);
767 if (fi->host->node_id == node) {
768 return handle_local_request(fi, req, node);
771 return handle_remote_request(fi, req, node);
775 static ssize_t dev_write(struct file *file, const char *buffer, size_t count,
776 loff_t *offset_is_ignored)
778 struct file_info *fi = (struct file_info *)file->private_data;
779 struct pending_request *req;
780 ssize_t retval = 0;
782 if (count != sizeof(struct raw1394_request)) {
783 return -EINVAL;
786 req = alloc_pending_request();
787 if (req == NULL) {
788 return -ENOMEM;
790 req->file_info = fi;
792 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
793 free_pending_request(req);
794 return -EFAULT;
797 switch (fi->state) {
798 case opened:
799 retval = state_opened(fi, req);
800 break;
802 case initialized:
803 retval = state_initialized(fi, req);
804 break;
806 case connected:
807 retval = state_connected(fi, req);
808 break;
811 if (retval < 0) {
812 free_pending_request(req);
815 return retval;
818 static unsigned int dev_poll(struct file *file, poll_table *pt)
820 struct file_info *fi = file->private_data;
821 unsigned int mask = POLLOUT | POLLWRNORM;
823 poll_wait(file, &fi->poll_wait_complete, pt);
825 spin_lock_irq(&fi->reqlists_lock);
826 if (!list_empty(&fi->req_complete)) {
827 mask |= POLLIN | POLLRDNORM;
829 spin_unlock_irq(&fi->reqlists_lock);
831 return mask;
834 static int dev_open(struct inode *inode, struct file *file)
836 struct file_info *fi;
838 if (MINOR(inode->i_rdev)) {
839 return -ENXIO;
842 V22_COMPAT_MOD_INC_USE_COUNT;
844 fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
845 if (fi == NULL) {
846 V22_COMPAT_MOD_DEC_USE_COUNT;
847 return -ENOMEM;
850 memset(fi, 0, sizeof(struct file_info));
852 INIT_LIST_HEAD(&fi->list);
853 fi->state = opened;
854 INIT_LIST_HEAD(&fi->req_pending);
855 INIT_LIST_HEAD(&fi->req_complete);
856 sema_init(&fi->complete_sem, 0);
857 spin_lock_init(&fi->reqlists_lock);
858 init_waitqueue_head(&fi->poll_wait_complete);
860 file->private_data = fi;
862 return 0;
865 static int dev_release(struct inode *inode, struct file *file)
867 struct file_info *fi = file->private_data;
868 struct list_head *lh;
869 struct pending_request *req;
870 int done = 0, i;
872 for (i = 0; i < 64; i++) {
873 if (fi->listen_channels & (1ULL << i)) {
874 hpsb_unlisten_channel(hl_handle, fi->host, i);
878 spin_lock(&host_info_lock);
879 fi->listen_channels = 0;
880 spin_unlock(&host_info_lock);
882 while (!done) {
883 spin_lock_irq(&fi->reqlists_lock);
885 while (!list_empty(&fi->req_complete)) {
886 lh = fi->req_complete.next;
887 list_del(lh);
889 req = list_entry(lh, struct pending_request, list);
891 free_pending_request(req);
894 if (list_empty(&fi->req_pending)) {
895 done = 1;
898 spin_unlock_irq(&fi->reqlists_lock);
900 if (!done) {
901 down_interruptible(&fi->complete_sem);
905 if (fi->state == connected) {
906 spin_lock_irq(&host_info_lock);
907 list_del(&fi->list);
908 spin_unlock_irq(&host_info_lock);
910 hpsb_dec_host_usage(fi->host);
913 kfree(fi);
915 V22_COMPAT_MOD_DEC_USE_COUNT;
916 return 0;
919 static struct hpsb_highlevel_ops hl_ops = {
920 add_host: add_host,
921 remove_host: remove_host,
922 host_reset: host_reset,
923 iso_receive: iso_receive,
924 fcp_request: fcp_request,
927 static struct file_operations file_ops = {
928 OWNER_THIS_MODULE
929 read: dev_read,
930 write: dev_write,
931 poll: dev_poll,
932 open: dev_open,
933 release: dev_release,
936 int init_raw1394(void)
938 hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
939 if (hl_handle == NULL) {
940 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
941 return -ENOMEM;
944 devfs_handle = devfs_register(NULL, RAW1394_DEVICE_NAME, DEVFS_FL_NONE,
945 RAW1394_DEVICE_MAJOR, 0,
946 S_IFCHR | S_IRUSR | S_IWUSR, &file_ops,
947 NULL);
949 if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME,
950 &file_ops)) {
951 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
952 return -EBUSY;
954 printk(KERN_INFO "raw1394: /dev/%s device initialized\n", RAW1394_DEVICE_NAME);
955 return 0;
958 void cleanup_raw1394(void)
960 devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME);
961 devfs_unregister(devfs_handle);
962 hpsb_unregister_highlevel(hl_handle);
965 #ifdef MODULE
967 int init_module(void)
969 return init_raw1394();
972 void cleanup_module(void)
974 return cleanup_raw1394();
977 #endif