- Linus: more PageDirty / swapcache handling
[davej-history.git] / drivers / ieee1394 / raw1394.c
blob16399b347e1d8786034c78709ce55baa8fe7e469
1 /*
2 * IEEE 1394 for Linux
4 * Raw interface to the bus
6 * Copyright (C) 1999, 2000 Andreas E. Bombe
8 * This code is licensed under the GPL. See the file COPYING in the root
9 * directory of the kernel sources for details.
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <linux/fs.h>
17 #include <linux/poll.h>
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #include <linux/smp_lock.h>
21 #include <asm/uaccess.h>
23 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
24 #include <linux/devfs_fs_kernel.h>
25 #endif
27 #include "ieee1394.h"
28 #include "ieee1394_types.h"
29 #include "ieee1394_core.h"
30 #include "hosts.h"
31 #include "highlevel.h"
32 #include "ieee1394_transactions.h"
33 #include "raw1394.h"
36 #if BITS_PER_LONG == 64
37 #define int2ptr(x) ((void *)x)
38 #define ptr2int(x) ((u64)x)
39 #else
40 #define int2ptr(x) ((void *)(u32)x)
41 #define ptr2int(x) ((u64)(u32)x)
42 #endif
45 static devfs_handle_t devfs_handle;
47 LIST_HEAD(host_info_list);
48 static int host_count;
49 spinlock_t host_info_lock = SPIN_LOCK_UNLOCKED;
51 static struct hpsb_highlevel *hl_handle;
53 static atomic_t iso_buffer_size;
54 static const int iso_buffer_max = 4 * 1024 * 1024; /* 4 MB */
56 static void queue_complete_cb(struct pending_request *req);
58 static struct pending_request *__alloc_pending_request(int flags)
60 struct pending_request *req;
62 req = (struct pending_request *)kmalloc(sizeof(struct pending_request),
63 flags);
64 if (req != NULL) {
65 memset(req, 0, sizeof(struct pending_request));
66 INIT_LIST_HEAD(&req->list);
67 req->tq.routine = (void(*)(void*))queue_complete_cb;
70 return req;
73 inline static struct pending_request *alloc_pending_request(void)
75 return __alloc_pending_request(SLAB_KERNEL);
78 static void free_pending_request(struct pending_request *req)
80 if (req->ibs) {
81 if (atomic_dec_and_test(&req->ibs->refcount)) {
82 atomic_sub(req->ibs->data_size, &iso_buffer_size);
83 kfree(req->ibs);
85 } else if (req->free_data) {
86 kfree(req->data);
88 free_hpsb_packet(req->packet);
89 kfree(req);
92 static void queue_complete_req(struct pending_request *req)
94 unsigned long flags;
95 struct file_info *fi = req->file_info;
97 spin_lock_irqsave(&fi->reqlists_lock, flags);
98 list_del(&req->list);
99 list_add_tail(&req->list, &fi->req_complete);
100 spin_unlock_irqrestore(&fi->reqlists_lock, flags);
102 up(&fi->complete_sem);
103 wake_up_interruptible(&fi->poll_wait_complete);
106 static void queue_complete_cb(struct pending_request *req)
108 struct hpsb_packet *packet = req->packet;
109 int rcode = (packet->header[1] >> 12) & 0xf;
111 switch (packet->ack_code) {
112 case ACKX_NONE:
113 case ACKX_SEND_ERROR:
114 req->req.error = RAW1394_ERROR_SEND_ERROR;
115 break;
116 case ACKX_ABORTED:
117 req->req.error = RAW1394_ERROR_ABORTED;
118 break;
119 case ACKX_TIMEOUT:
120 req->req.error = RAW1394_ERROR_TIMEOUT;
121 break;
122 default:
123 req->req.error = (packet->ack_code << 16) | rcode;
124 break;
127 if (!((packet->ack_code == ACK_PENDING) && (rcode == RCODE_COMPLETE))) {
128 req->req.length = 0;
131 free_tlabel(packet->host, packet->node_id, packet->tlabel);
133 queue_complete_req(req);
137 static void add_host(struct hpsb_host *host)
139 struct host_info *hi;
141 hi = (struct host_info *)kmalloc(sizeof(struct host_info), SLAB_KERNEL);
142 if (hi != NULL) {
143 INIT_LIST_HEAD(&hi->list);
144 hi->host = host;
145 INIT_LIST_HEAD(&hi->file_info_list);
147 spin_lock_irq(&host_info_lock);
148 list_add_tail(&hi->list, &host_info_list);
149 host_count++;
150 spin_unlock_irq(&host_info_lock);
155 static struct host_info *find_host_info(struct hpsb_host *host)
157 struct list_head *lh;
158 struct host_info *hi;
160 lh = host_info_list.next;
161 while (lh != &host_info_list) {
162 hi = list_entry(lh, struct host_info, list);
163 if (hi->host == host) {
164 return hi;
166 lh = lh->next;
169 return NULL;
172 static void remove_host(struct hpsb_host *host)
174 struct host_info *hi;
176 spin_lock_irq(&host_info_lock);
177 hi = find_host_info(host);
179 if (hi != NULL) {
180 list_del(&hi->list);
181 host_count--;
183 spin_unlock_irq(&host_info_lock);
185 if (hi == NULL) {
186 printk(KERN_ERR "raw1394: attempt to remove unknown host "
187 "0x%p\n", host);
188 return;
191 kfree(hi);
194 static void host_reset(struct hpsb_host *host)
196 unsigned long flags;
197 struct list_head *lh;
198 struct host_info *hi;
199 struct file_info *fi;
200 struct pending_request *req;
202 spin_lock_irqsave(&host_info_lock, flags);
203 hi = find_host_info(host);
205 if (hi != NULL) {
206 lh = hi->file_info_list.next;
208 while (lh != &hi->file_info_list) {
209 fi = list_entry(lh, struct file_info, list);
210 req = __alloc_pending_request(SLAB_ATOMIC);
212 if (req != NULL) {
213 req->file_info = fi;
214 req->req.type = RAW1394_REQ_BUS_RESET;
215 req->req.generation = get_hpsb_generation();
216 req->req.misc = (host->node_id << 16)
217 | host->node_count;
218 if (fi->protocol_version > 3) {
219 req->req.misc |= ((host->irm_id
220 & NODE_MASK) << 8);
223 queue_complete_req(req);
226 lh = lh->next;
229 spin_unlock_irqrestore(&host_info_lock, flags);
232 static void iso_receive(struct hpsb_host *host, int channel, quadlet_t *data,
233 unsigned int length)
235 unsigned long flags;
236 struct list_head *lh;
237 struct host_info *hi;
238 struct file_info *fi;
239 struct pending_request *req;
240 struct iso_block_store *ibs = NULL;
241 LIST_HEAD(reqs);
243 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
244 HPSB_INFO("dropped iso packet");
245 return;
248 spin_lock_irqsave(&host_info_lock, flags);
249 hi = find_host_info(host);
251 if (hi != NULL) {
252 for (lh = hi->file_info_list.next; lh != &hi->file_info_list;
253 lh = lh->next) {
254 fi = list_entry(lh, struct file_info, list);
256 if (!(fi->listen_channels & (1ULL << channel))) {
257 continue;
260 req = __alloc_pending_request(SLAB_ATOMIC);
261 if (!req) break;
263 if (!ibs) {
264 ibs = kmalloc(sizeof(struct iso_block_store)
265 + length, SLAB_ATOMIC);
266 if (!ibs) {
267 kfree(req);
268 break;
271 atomic_add(length, &iso_buffer_size);
272 atomic_set(&ibs->refcount, 0);
273 ibs->data_size = length;
274 memcpy(ibs->data, data, length);
277 atomic_inc(&ibs->refcount);
279 req->file_info = fi;
280 req->ibs = ibs;
281 req->data = ibs->data;
282 req->req.type = RAW1394_REQ_ISO_RECEIVE;
283 req->req.generation = get_hpsb_generation();
284 req->req.misc = 0;
285 req->req.recvb = ptr2int(fi->iso_buffer);
286 req->req.length = MIN(length, fi->iso_buffer_length);
288 list_add_tail(&req->list, &reqs);
291 spin_unlock_irqrestore(&host_info_lock, flags);
293 lh = reqs.next;
294 while (lh != &reqs) {
295 req = list_entry(lh, struct pending_request, list);
296 lh = lh->next;
297 queue_complete_req(req);
301 static void fcp_request(struct hpsb_host *host, int nodeid, int direction,
302 int cts, u8 *data, unsigned int length)
304 unsigned long flags;
305 struct list_head *lh;
306 struct host_info *hi;
307 struct file_info *fi;
308 struct pending_request *req;
309 struct iso_block_store *ibs = NULL;
310 LIST_HEAD(reqs);
312 if ((atomic_read(&iso_buffer_size) + length) > iso_buffer_max) {
313 HPSB_INFO("dropped fcp request");
314 return;
317 spin_lock_irqsave(&host_info_lock, flags);
318 hi = find_host_info(host);
320 if (hi != NULL) {
321 for (lh = hi->file_info_list.next; lh != &hi->file_info_list;
322 lh = lh->next) {
323 fi = list_entry(lh, struct file_info, list);
325 if (!fi->fcp_buffer) {
326 continue;
329 req = __alloc_pending_request(SLAB_ATOMIC);
330 if (!req) break;
332 if (!ibs) {
333 ibs = kmalloc(sizeof(struct iso_block_store)
334 + length, SLAB_ATOMIC);
335 if (!ibs) {
336 kfree(req);
337 break;
340 atomic_add(length, &iso_buffer_size);
341 atomic_set(&ibs->refcount, 0);
342 ibs->data_size = length;
343 memcpy(ibs->data, data, length);
346 atomic_inc(&ibs->refcount);
348 req->file_info = fi;
349 req->ibs = ibs;
350 req->data = ibs->data;
351 req->req.type = RAW1394_REQ_FCP_REQUEST;
352 req->req.generation = get_hpsb_generation();
353 req->req.misc = nodeid | (direction << 16);
354 req->req.recvb = ptr2int(fi->fcp_buffer);
355 req->req.length = length;
357 list_add_tail(&req->list, &reqs);
360 spin_unlock_irqrestore(&host_info_lock, flags);
362 lh = reqs.next;
363 while (lh != &reqs) {
364 req = list_entry(lh, struct pending_request, list);
365 lh = lh->next;
366 queue_complete_req(req);
371 static ssize_t dev_read(struct file *file, char *buffer, size_t count,
372 loff_t *offset_is_ignored)
374 struct file_info *fi = (struct file_info *)file->private_data;
375 struct list_head *lh;
376 struct pending_request *req;
378 if (count != sizeof(struct raw1394_request)) {
379 return -EINVAL;
382 if (!access_ok(VERIFY_WRITE, buffer, count)) {
383 return -EFAULT;
386 if (file->f_flags & O_NONBLOCK) {
387 if (down_trylock(&fi->complete_sem)) {
388 return -EAGAIN;
390 } else {
391 if (down_interruptible(&fi->complete_sem)) {
392 return -ERESTARTSYS;
396 spin_lock_irq(&fi->reqlists_lock);
397 lh = fi->req_complete.next;
398 list_del(lh);
399 spin_unlock_irq(&fi->reqlists_lock);
401 req = list_entry(lh, struct pending_request, list);
403 if (req->req.length) {
404 if (copy_to_user(int2ptr(req->req.recvb), req->data,
405 req->req.length)) {
406 req->req.error = RAW1394_ERROR_MEMFAULT;
409 __copy_to_user(buffer, &req->req, sizeof(req->req));
411 free_pending_request(req);
412 return sizeof(struct raw1394_request);
416 static int state_opened(struct file_info *fi, struct pending_request *req)
418 if (req->req.type == RAW1394_REQ_INITIALIZE) {
419 switch (req->req.misc) {
420 case RAW1394_KERNELAPI_VERSION:
421 case 3:
422 fi->state = initialized;
423 fi->protocol_version = req->req.misc;
424 req->req.error = RAW1394_ERROR_NONE;
425 req->req.generation = get_hpsb_generation();
426 break;
428 default:
429 req->req.error = RAW1394_ERROR_COMPAT;
430 req->req.misc = RAW1394_KERNELAPI_VERSION;
432 } else {
433 req->req.error = RAW1394_ERROR_STATE_ORDER;
436 req->req.length = 0;
437 queue_complete_req(req);
438 return sizeof(struct raw1394_request);
441 static int state_initialized(struct file_info *fi, struct pending_request *req)
443 struct list_head *lh;
444 struct host_info *hi;
445 struct raw1394_khost_list *khl;
447 if (req->req.generation != get_hpsb_generation()) {
448 req->req.error = RAW1394_ERROR_GENERATION;
449 req->req.generation = get_hpsb_generation();
450 req->req.length = 0;
451 queue_complete_req(req);
452 return sizeof(struct raw1394_request);
455 switch (req->req.type) {
456 case RAW1394_REQ_LIST_CARDS:
457 spin_lock_irq(&host_info_lock);
458 khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count,
459 SLAB_ATOMIC);
461 if (khl != NULL) {
462 req->req.misc = host_count;
463 req->data = (quadlet_t *)khl;
465 lh = host_info_list.next;
466 while (lh != &host_info_list) {
467 hi = list_entry(lh, struct host_info, list);
469 khl->nodes = hi->host->node_count;
470 strcpy(khl->name, hi->host->template->name);
472 khl++;
473 lh = lh->next;
476 spin_unlock_irq(&host_info_lock);
478 if (khl != NULL) {
479 req->req.error = RAW1394_ERROR_NONE;
480 req->req.length = MIN(req->req.length,
481 sizeof(struct raw1394_khost_list)
482 * req->req.misc);
483 req->free_data = 1;
484 } else {
485 return -ENOMEM;
487 break;
489 case RAW1394_REQ_SET_CARD:
490 lh = NULL;
492 spin_lock_irq(&host_info_lock);
493 if (req->req.misc < host_count) {
494 lh = host_info_list.next;
495 while (req->req.misc--) {
496 lh = lh->next;
498 hi = list_entry(lh, struct host_info, list);
499 hpsb_inc_host_usage(hi->host);
500 list_add_tail(&fi->list, &hi->file_info_list);
501 fi->host = hi->host;
502 fi->state = connected;
504 spin_unlock_irq(&host_info_lock);
506 if (lh != NULL) {
507 req->req.error = RAW1394_ERROR_NONE;
508 req->req.misc = (fi->host->node_id << 16)
509 | fi->host->node_count;
510 if (fi->protocol_version > 3) {
511 req->req.misc |=
512 (fi->host->irm_id & NODE_MASK) << 8;
514 } else {
515 req->req.error = RAW1394_ERROR_INVALID_ARG;
518 req->req.length = 0;
519 break;
521 default:
522 req->req.error = RAW1394_ERROR_STATE_ORDER;
523 req->req.length = 0;
524 break;
527 queue_complete_req(req);
528 return sizeof(struct raw1394_request);
531 static void handle_iso_listen(struct file_info *fi, struct pending_request *req)
533 int channel = req->req.misc;
535 spin_lock(&host_info_lock);
536 if ((channel > 63) || (channel < -64)) {
537 req->req.error = RAW1394_ERROR_INVALID_ARG;
538 } else if (channel >= 0) {
539 /* allocate channel req.misc */
540 if (fi->listen_channels & (1ULL << channel)) {
541 req->req.error = RAW1394_ERROR_ALREADY;
542 } else {
543 fi->listen_channels |= 1ULL << channel;
544 hpsb_listen_channel(hl_handle, fi->host, channel);
545 fi->iso_buffer = int2ptr(req->req.recvb);
546 fi->iso_buffer_length = req->req.length;
548 } else {
549 /* deallocate channel (one's complement neg) req.misc */
550 channel = ~channel;
552 if (fi->listen_channels & (1ULL << channel)) {
553 hpsb_unlisten_channel(hl_handle, fi->host, channel);
554 fi->listen_channels &= ~(1ULL << channel);
555 } else {
556 req->req.error = RAW1394_ERROR_INVALID_ARG;
560 req->req.length = 0;
561 queue_complete_req(req);
562 spin_unlock(&host_info_lock);
565 static void handle_fcp_listen(struct file_info *fi, struct pending_request *req)
567 if (req->req.misc) {
568 if (fi->fcp_buffer) {
569 req->req.error = RAW1394_ERROR_ALREADY;
570 } else {
571 fi->fcp_buffer = (u8 *)int2ptr(req->req.recvb);
573 } else {
574 if (!fi->fcp_buffer) {
575 req->req.error = RAW1394_ERROR_ALREADY;
576 } else {
577 fi->fcp_buffer = NULL;
581 req->req.length = 0;
582 queue_complete_req(req);
585 static int handle_local_request(struct file_info *fi,
586 struct pending_request *req, int node)
588 u64 addr = req->req.address & 0xffffffffffffULL;
590 req->data = kmalloc(req->req.length, SLAB_KERNEL);
591 if (!req->data) return -ENOMEM;
592 req->free_data = 1;
594 switch (req->req.type) {
595 case RAW1394_REQ_ASYNC_READ:
596 req->req.error = highlevel_read(fi->host, node, req->data, addr,
597 req->req.length);
598 break;
600 case RAW1394_REQ_ASYNC_WRITE:
601 if (copy_from_user(req->data, int2ptr(req->req.sendb),
602 req->req.length)) {
603 req->req.error = RAW1394_ERROR_MEMFAULT;
604 break;
607 req->req.error = highlevel_write(fi->host, node, req->data,
608 addr, req->req.length);
609 req->req.length = 0;
610 break;
612 case RAW1394_REQ_LOCK:
613 if ((req->req.misc == EXTCODE_FETCH_ADD)
614 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
615 if (req->req.length != 4) {
616 req->req.error = RAW1394_ERROR_INVALID_ARG;
617 break;
619 } else {
620 if (req->req.length != 8) {
621 req->req.error = RAW1394_ERROR_INVALID_ARG;
622 break;
626 if (copy_from_user(req->data, int2ptr(req->req.sendb),
627 req->req.length)) {
628 req->req.error = RAW1394_ERROR_MEMFAULT;
629 break;
632 if (req->req.length == 8) {
633 req->req.error = highlevel_lock(fi->host, node,
634 req->data, addr,
635 req->data[1],
636 req->data[0],
637 req->req.misc);
638 req->req.length = 4;
639 } else {
640 req->req.error = highlevel_lock(fi->host, node,
641 req->data, addr,
642 req->data[0], 0,
643 req->req.misc);
645 break;
647 case RAW1394_REQ_LOCK64:
648 default:
649 req->req.error = RAW1394_ERROR_STATE_ORDER;
652 if (req->req.error) req->req.length = 0;
653 req->req.error |= 0x00100000;
654 queue_complete_req(req);
655 return sizeof(struct raw1394_request);
658 static int handle_remote_request(struct file_info *fi,
659 struct pending_request *req, int node)
661 struct hpsb_packet *packet = NULL;
662 u64 addr = req->req.address & 0xffffffffffffULL;
664 switch (req->req.type) {
665 case RAW1394_REQ_ASYNC_READ:
666 if (req->req.length == 4) {
667 packet = hpsb_make_readqpacket(fi->host, node, addr);
668 if (!packet) return -ENOMEM;
670 req->data = &packet->header[3];
671 } else {
672 packet = hpsb_make_readbpacket(fi->host, node, addr,
673 req->req.length);
674 if (!packet) return -ENOMEM;
676 req->data = packet->data;
678 break;
680 case RAW1394_REQ_ASYNC_WRITE:
681 if (req->req.length == 4) {
682 quadlet_t x;
684 if (copy_from_user(&x, int2ptr(req->req.sendb), 4)) {
685 req->req.error = RAW1394_ERROR_MEMFAULT;
688 packet = hpsb_make_writeqpacket(fi->host, node, addr,
690 if (!packet) return -ENOMEM;
691 } else {
692 packet = hpsb_make_writebpacket(fi->host, node, addr,
693 req->req.length);
694 if (!packet) return -ENOMEM;
696 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
697 req->req.length)) {
698 req->req.error = RAW1394_ERROR_MEMFAULT;
701 req->req.length = 0;
702 break;
704 case RAW1394_REQ_LOCK:
705 if ((req->req.misc == EXTCODE_FETCH_ADD)
706 || (req->req.misc == EXTCODE_LITTLE_ADD)) {
707 if (req->req.length != 4) {
708 req->req.error = RAW1394_ERROR_INVALID_ARG;
709 break;
711 } else {
712 if (req->req.length != 8) {
713 req->req.error = RAW1394_ERROR_INVALID_ARG;
714 break;
718 packet = hpsb_make_lockpacket(fi->host, node, addr,
719 req->req.misc);
720 if (!packet) return -ENOMEM;
722 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
723 req->req.length)) {
724 req->req.error = RAW1394_ERROR_MEMFAULT;
725 break;
728 req->data = packet->data;
729 req->req.length = 4;
730 break;
732 case RAW1394_REQ_LOCK64:
733 default:
734 req->req.error = RAW1394_ERROR_STATE_ORDER;
737 req->packet = packet;
739 if (req->req.error) {
740 req->req.length = 0;
741 queue_complete_req(req);
742 return sizeof(struct raw1394_request);
745 req->tq.data = req;
746 queue_task(&req->tq, &packet->complete_tq);
748 spin_lock_irq(&fi->reqlists_lock);
749 list_add_tail(&req->list, &fi->req_pending);
750 spin_unlock_irq(&fi->reqlists_lock);
752 if (!hpsb_send_packet(packet)) {
753 req->req.error = RAW1394_ERROR_SEND_ERROR;
754 req->req.length = 0;
755 free_tlabel(packet->host, packet->node_id, packet->tlabel);
756 queue_complete_req(req);
758 return sizeof(struct raw1394_request);
761 static int handle_iso_send(struct file_info *fi, struct pending_request *req,
762 int channel)
764 struct hpsb_packet *packet;
766 packet = alloc_hpsb_packet(req->req.length);
767 if (!packet) return -ENOMEM;
768 req->packet = packet;
770 fill_iso_packet(packet, req->req.length, channel & 0x3f,
771 (req->req.misc >> 16) & 0x3, req->req.misc & 0xf);
772 packet->type = iso;
773 packet->speed_code = req->req.address & 0x3;
774 packet->host = fi->host;
776 if (copy_from_user(packet->data, int2ptr(req->req.sendb),
777 req->req.length)) {
778 req->req.error = RAW1394_ERROR_MEMFAULT;
779 req->req.length = 0;
780 queue_complete_req(req);
781 return sizeof(struct raw1394_request);
784 req->tq.data = req;
785 req->tq.routine = (void (*)(void*))queue_complete_req;
786 req->req.length = 0;
787 queue_task(&req->tq, &packet->complete_tq);
789 spin_lock_irq(&fi->reqlists_lock);
790 list_add_tail(&req->list, &fi->req_pending);
791 spin_unlock_irq(&fi->reqlists_lock);
793 if (!hpsb_send_packet(packet)) {
794 req->req.error = RAW1394_ERROR_SEND_ERROR;
795 queue_complete_req(req);
798 return sizeof(struct raw1394_request);
801 static int state_connected(struct file_info *fi, struct pending_request *req)
803 int node = req->req.address >> 48;
805 req->req.error = RAW1394_ERROR_NONE;
807 if (req->req.type == RAW1394_REQ_ISO_SEND) {
808 return handle_iso_send(fi, req, node);
811 if (req->req.generation != get_hpsb_generation()) {
812 req->req.error = RAW1394_ERROR_GENERATION;
813 req->req.generation = get_hpsb_generation();
814 req->req.length = 0;
815 queue_complete_req(req);
816 return sizeof(struct raw1394_request);
819 switch (req->req.type) {
820 case RAW1394_REQ_ISO_LISTEN:
821 handle_iso_listen(fi, req);
822 return sizeof(struct raw1394_request);
824 case RAW1394_REQ_FCP_LISTEN:
825 handle_fcp_listen(fi, req);
826 return sizeof(struct raw1394_request);
828 case RAW1394_REQ_RESET_BUS:
829 hpsb_reset_bus(fi->host);
830 return sizeof(struct raw1394_request);
833 if (req->req.length == 0) {
834 req->req.error = RAW1394_ERROR_INVALID_ARG;
835 queue_complete_req(req);
836 return sizeof(struct raw1394_request);
839 if (fi->host->node_id == node) {
840 return handle_local_request(fi, req, node);
843 return handle_remote_request(fi, req, node);
847 static ssize_t dev_write(struct file *file, const char *buffer, size_t count,
848 loff_t *offset_is_ignored)
850 struct file_info *fi = (struct file_info *)file->private_data;
851 struct pending_request *req;
852 ssize_t retval = 0;
854 if (count != sizeof(struct raw1394_request)) {
855 return -EINVAL;
858 req = alloc_pending_request();
859 if (req == NULL) {
860 return -ENOMEM;
862 req->file_info = fi;
864 if (copy_from_user(&req->req, buffer, sizeof(struct raw1394_request))) {
865 free_pending_request(req);
866 return -EFAULT;
869 switch (fi->state) {
870 case opened:
871 retval = state_opened(fi, req);
872 break;
874 case initialized:
875 retval = state_initialized(fi, req);
876 break;
878 case connected:
879 retval = state_connected(fi, req);
880 break;
883 if (retval < 0) {
884 free_pending_request(req);
887 return retval;
890 static unsigned int dev_poll(struct file *file, poll_table *pt)
892 struct file_info *fi = file->private_data;
893 unsigned int mask = POLLOUT | POLLWRNORM;
895 poll_wait(file, &fi->poll_wait_complete, pt);
897 spin_lock_irq(&fi->reqlists_lock);
898 if (!list_empty(&fi->req_complete)) {
899 mask |= POLLIN | POLLRDNORM;
901 spin_unlock_irq(&fi->reqlists_lock);
903 return mask;
906 static int dev_open(struct inode *inode, struct file *file)
908 struct file_info *fi;
910 if (MINOR(inode->i_rdev)) {
911 return -ENXIO;
914 V22_COMPAT_MOD_INC_USE_COUNT;
916 fi = kmalloc(sizeof(struct file_info), SLAB_KERNEL);
917 if (fi == NULL) {
918 V22_COMPAT_MOD_DEC_USE_COUNT;
919 return -ENOMEM;
922 memset(fi, 0, sizeof(struct file_info));
924 INIT_LIST_HEAD(&fi->list);
925 fi->state = opened;
926 INIT_LIST_HEAD(&fi->req_pending);
927 INIT_LIST_HEAD(&fi->req_complete);
928 sema_init(&fi->complete_sem, 0);
929 spin_lock_init(&fi->reqlists_lock);
930 init_waitqueue_head(&fi->poll_wait_complete);
932 file->private_data = fi;
934 return 0;
937 static int dev_release(struct inode *inode, struct file *file)
939 struct file_info *fi = file->private_data;
940 struct list_head *lh;
941 struct pending_request *req;
942 int done = 0, i;
944 lock_kernel();
945 for (i = 0; i < 64; i++) {
946 if (fi->listen_channels & (1ULL << i)) {
947 hpsb_unlisten_channel(hl_handle, fi->host, i);
951 spin_lock(&host_info_lock);
952 fi->listen_channels = 0;
953 spin_unlock(&host_info_lock);
955 while (!done) {
956 spin_lock_irq(&fi->reqlists_lock);
958 while (!list_empty(&fi->req_complete)) {
959 lh = fi->req_complete.next;
960 list_del(lh);
962 req = list_entry(lh, struct pending_request, list);
964 free_pending_request(req);
967 if (list_empty(&fi->req_pending)) {
968 done = 1;
971 spin_unlock_irq(&fi->reqlists_lock);
973 if (!done) {
974 down_interruptible(&fi->complete_sem);
978 if (fi->state == connected) {
979 spin_lock_irq(&host_info_lock);
980 list_del(&fi->list);
981 spin_unlock_irq(&host_info_lock);
983 hpsb_dec_host_usage(fi->host);
986 kfree(fi);
988 V22_COMPAT_MOD_DEC_USE_COUNT;
989 unlock_kernel();
990 return 0;
993 static struct hpsb_highlevel_ops hl_ops = {
994 add_host: add_host,
995 remove_host: remove_host,
996 host_reset: host_reset,
997 iso_receive: iso_receive,
998 fcp_request: fcp_request,
1001 static struct file_operations file_ops = {
1002 OWNER_THIS_MODULE
1003 read: dev_read,
1004 write: dev_write,
1005 poll: dev_poll,
1006 open: dev_open,
1007 release: dev_release,
1010 int init_raw1394(void)
1012 hl_handle = hpsb_register_highlevel(RAW1394_DEVICE_NAME, &hl_ops);
1013 if (hl_handle == NULL) {
1014 HPSB_ERR("raw1394 failed to register with ieee1394 highlevel");
1015 return -ENOMEM;
1018 devfs_handle = devfs_register(NULL, RAW1394_DEVICE_NAME, DEVFS_FL_NONE,
1019 RAW1394_DEVICE_MAJOR, 0,
1020 S_IFCHR | S_IRUSR | S_IWUSR, &file_ops,
1021 NULL);
1023 if (devfs_register_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME,
1024 &file_ops)) {
1025 HPSB_ERR("raw1394 failed to register /dev/raw1394 device");
1026 return -EBUSY;
1028 printk(KERN_INFO "raw1394: /dev/%s device initialized\n", RAW1394_DEVICE_NAME);
1029 return 0;
1032 void cleanup_raw1394(void)
1034 devfs_unregister_chrdev(RAW1394_DEVICE_MAJOR, RAW1394_DEVICE_NAME);
1035 devfs_unregister(devfs_handle);
1036 hpsb_unregister_highlevel(hl_handle);
1039 #ifdef MODULE
1041 int init_module(void)
1043 return init_raw1394();
1046 void cleanup_module(void)
1048 return cleanup_raw1394();
1051 #endif