2 * Core maple bus functionality
4 * Copyright (C) 2007 - 2009 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt
6 * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7 * Copyright (C) 2001 M. R. Brown
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/maple.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <linux/module.h>
24 #include <asm/cacheflush.h>
28 #include <mach/sysasic.h>
30 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
31 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
32 MODULE_LICENSE("GPL v2");
33 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
35 static void maple_dma_handler(struct work_struct
*work
);
36 static void maple_vblank_handler(struct work_struct
*work
);
38 static DECLARE_WORK(maple_dma_process
, maple_dma_handler
);
39 static DECLARE_WORK(maple_vblank_process
, maple_vblank_handler
);
41 static LIST_HEAD(maple_waitq
);
42 static LIST_HEAD(maple_sentq
);
44 /* mutex to protect queue of waiting packets */
45 static DEFINE_MUTEX(maple_wlist_lock
);
47 static struct maple_driver maple_unsupported_device
;
48 static struct device maple_bus
;
49 static int subdevice_map
[MAPLE_PORTS
];
50 static unsigned long *maple_sendbuf
, *maple_sendptr
, *maple_lastptr
;
51 static unsigned long maple_pnp_time
;
52 static int started
, scanning
, fullscan
;
53 static struct kmem_cache
*maple_queue_cache
;
55 struct maple_device_specify
{
60 static bool checked
[MAPLE_PORTS
];
61 static bool empty
[MAPLE_PORTS
];
62 static struct maple_device
*baseunits
[MAPLE_PORTS
];
65 * maple_driver_register - register a maple driver
66 * @drv: maple driver to be registered.
68 * Registers the passed in @drv, while updating the bus type.
69 * Devices with matching function IDs will be automatically probed.
71 int maple_driver_register(struct maple_driver
*drv
)
76 drv
->drv
.bus
= &maple_bus_type
;
78 return driver_register(&drv
->drv
);
80 EXPORT_SYMBOL_GPL(maple_driver_register
);
83 * maple_driver_unregister - unregister a maple driver.
84 * @drv: maple driver to unregister.
86 * Cleans up after maple_driver_register(). To be invoked in the exit
87 * path of any module drivers.
89 void maple_driver_unregister(struct maple_driver
*drv
)
91 driver_unregister(&drv
->drv
);
93 EXPORT_SYMBOL_GPL(maple_driver_unregister
);
95 /* set hardware registers to enable next round of dma */
96 static void maple_dma_reset(void)
98 __raw_writel(MAPLE_MAGIC
, MAPLE_RESET
);
99 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
100 __raw_writel(1, MAPLE_TRIGTYPE
);
102 * Maple system register
103 * bits 31 - 16 timeout in units of 20nsec
104 * bit 12 hard trigger - set 0 to keep responding to VBLANK
105 * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
106 * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
109 __raw_writel(MAPLE_2MBPS
| MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED
);
110 __raw_writel(virt_to_phys(maple_sendbuf
), MAPLE_DMAADDR
);
111 __raw_writel(1, MAPLE_ENABLE
);
115 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
116 * @dev: device responding
117 * @callback: handler callback
118 * @interval: interval in jiffies between callbacks
119 * @function: the function code for the device
121 void maple_getcond_callback(struct maple_device
*dev
,
122 void (*callback
) (struct mapleq
*mq
),
123 unsigned long interval
, unsigned long function
)
125 dev
->callback
= callback
;
126 dev
->interval
= interval
;
127 dev
->function
= cpu_to_be32(function
);
130 EXPORT_SYMBOL_GPL(maple_getcond_callback
);
132 static int maple_dma_done(void)
134 return (__raw_readl(MAPLE_STATE
) & 1) == 0;
137 static void maple_release_device(struct device
*dev
)
139 struct maple_device
*mdev
;
142 mdev
= to_maple_dev(dev
);
144 kmem_cache_free(maple_queue_cache
, mq
->recvbuf
);
150 * maple_add_packet - add a single instruction to the maple bus queue
151 * @mdev: maple device
152 * @function: function on device being queried
153 * @command: maple command to add
154 * @length: length of command string (in 32 bit words)
155 * @data: remainder of command string
157 int maple_add_packet(struct maple_device
*mdev
, u32 function
, u32 command
,
158 size_t length
, void *data
)
161 void *sendbuf
= NULL
;
164 sendbuf
= kzalloc(length
* 4, GFP_KERNEL
);
169 ((__be32
*)sendbuf
)[0] = cpu_to_be32(function
);
172 mdev
->mq
->command
= command
;
173 mdev
->mq
->length
= length
;
175 memcpy(sendbuf
+ 4, data
, (length
- 1) * 4);
176 mdev
->mq
->sendbuf
= sendbuf
;
178 mutex_lock(&maple_wlist_lock
);
179 list_add_tail(&mdev
->mq
->list
, &maple_waitq
);
180 mutex_unlock(&maple_wlist_lock
);
184 EXPORT_SYMBOL_GPL(maple_add_packet
);
186 static struct mapleq
*maple_allocq(struct maple_device
*mdev
)
190 mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
194 INIT_LIST_HEAD(&mq
->list
);
196 mq
->recvbuf
= kmem_cache_zalloc(maple_queue_cache
, GFP_KERNEL
);
199 mq
->recvbuf
->buf
= &((mq
->recvbuf
->bufx
)[0]);
206 dev_err(&mdev
->dev
, "could not allocate memory for device (%d, %d)\n",
207 mdev
->port
, mdev
->unit
);
211 static struct maple_device
*maple_alloc_dev(int port
, int unit
)
213 struct maple_device
*mdev
;
215 /* zero this out to avoid kobj subsystem
216 * thinking it has already been registered */
218 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
225 mdev
->mq
= maple_allocq(mdev
);
231 mdev
->dev
.bus
= &maple_bus_type
;
232 mdev
->dev
.parent
= &maple_bus
;
233 init_waitqueue_head(&mdev
->maple_wait
);
237 static void maple_free_dev(struct maple_device
*mdev
)
239 kmem_cache_free(maple_queue_cache
, mdev
->mq
->recvbuf
);
244 /* process the command queue into a maple command block
245 * terminating command has bit 32 of first long set to 0
247 static void maple_build_block(struct mapleq
*mq
)
249 int port
, unit
, from
, to
, len
;
250 unsigned long *lsendbuf
= mq
->sendbuf
;
252 port
= mq
->dev
->port
& 3;
253 unit
= mq
->dev
->unit
;
256 to
= (port
<< 6) | (unit
> 0 ? (1 << (unit
- 1)) & 0x1f : 0x20);
258 *maple_lastptr
&= 0x7fffffff;
259 maple_lastptr
= maple_sendptr
;
261 *maple_sendptr
++ = (port
<< 16) | len
| 0x80000000;
262 *maple_sendptr
++ = virt_to_phys(mq
->recvbuf
->buf
);
264 mq
->command
| (to
<< 8) | (from
<< 16) | (len
<< 24);
266 *maple_sendptr
++ = *lsendbuf
++;
269 /* build up command queue */
270 static void maple_send(void)
272 int i
, maple_packets
= 0;
273 struct mapleq
*mq
, *nmq
;
275 if (!maple_dma_done())
279 __raw_writel(0, MAPLE_ENABLE
);
281 if (!list_empty(&maple_sentq
))
284 mutex_lock(&maple_wlist_lock
);
285 if (list_empty(&maple_waitq
)) {
286 mutex_unlock(&maple_wlist_lock
);
290 maple_lastptr
= maple_sendbuf
;
291 maple_sendptr
= maple_sendbuf
;
293 list_for_each_entry_safe(mq
, nmq
, &maple_waitq
, list
) {
294 maple_build_block(mq
);
295 list_del_init(&mq
->list
);
296 list_add_tail(&mq
->list
, &maple_sentq
);
297 if (maple_packets
++ > MAPLE_MAXPACKETS
)
300 mutex_unlock(&maple_wlist_lock
);
301 if (maple_packets
> 0) {
302 for (i
= 0; i
< (1 << MAPLE_DMA_PAGES
); i
++)
303 dma_cache_sync(0, maple_sendbuf
+ i
* PAGE_SIZE
,
304 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
311 /* check if there is a driver registered likely to match this device */
312 static int maple_check_matching_driver(struct device_driver
*driver
,
315 struct maple_driver
*maple_drv
;
316 struct maple_device
*mdev
;
319 maple_drv
= to_maple_driver(driver
);
320 if (mdev
->devinfo
.function
& cpu_to_be32(maple_drv
->function
))
325 static void maple_detach_driver(struct maple_device
*mdev
)
327 device_unregister(&mdev
->dev
);
330 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
331 static void maple_attach_driver(struct maple_device
*mdev
)
334 unsigned long function
;
337 recvbuf
= mdev
->mq
->recvbuf
->buf
;
338 /* copy the data as individual elements in
339 * case of memory optimisation */
340 memcpy(&mdev
->devinfo
.function
, recvbuf
+ 4, 4);
341 memcpy(&mdev
->devinfo
.function_data
[0], recvbuf
+ 8, 12);
342 memcpy(&mdev
->devinfo
.area_code
, recvbuf
+ 20, 1);
343 memcpy(&mdev
->devinfo
.connector_direction
, recvbuf
+ 21, 1);
344 memcpy(&mdev
->devinfo
.product_name
[0], recvbuf
+ 22, 30);
345 memcpy(&mdev
->devinfo
.standby_power
, recvbuf
+ 112, 2);
346 memcpy(&mdev
->devinfo
.max_power
, recvbuf
+ 114, 2);
347 memcpy(mdev
->product_name
, mdev
->devinfo
.product_name
, 30);
348 mdev
->product_name
[30] = '\0';
349 memcpy(mdev
->product_licence
, mdev
->devinfo
.product_licence
, 60);
350 mdev
->product_licence
[60] = '\0';
352 for (p
= mdev
->product_name
+ 29; mdev
->product_name
<= p
; p
--)
357 for (p
= mdev
->product_licence
+ 59; mdev
->product_licence
<= p
; p
--)
363 function
= be32_to_cpu(mdev
->devinfo
.function
);
365 dev_info(&mdev
->dev
, "detected %s: function 0x%lX: at (%d, %d)\n",
366 mdev
->product_name
, function
, mdev
->port
, mdev
->unit
);
368 if (function
> 0x200) {
369 /* Do this silently - as not a real device */
371 mdev
->driver
= &maple_unsupported_device
;
372 dev_set_name(&mdev
->dev
, "%d:0.port", mdev
->port
);
375 bus_for_each_drv(&maple_bus_type
, NULL
, mdev
,
376 maple_check_matching_driver
);
379 /* Driver does not exist yet */
380 dev_info(&mdev
->dev
, "no driver found\n");
381 mdev
->driver
= &maple_unsupported_device
;
383 dev_set_name(&mdev
->dev
, "%d:0%d.%lX", mdev
->port
,
384 mdev
->unit
, function
);
387 mdev
->function
= function
;
388 mdev
->dev
.release
= &maple_release_device
;
390 atomic_set(&mdev
->busy
, 0);
391 error
= device_register(&mdev
->dev
);
393 dev_warn(&mdev
->dev
, "could not register device at"
394 " (%d, %d), with error 0x%X\n", mdev
->unit
,
396 maple_free_dev(mdev
);
403 * if device has been registered for the given
404 * port and unit then return 1 - allows identification
405 * of which devices need to be attached or detached
407 static int check_maple_device(struct device
*device
, void *portptr
)
409 struct maple_device_specify
*ds
;
410 struct maple_device
*mdev
;
413 mdev
= to_maple_dev(device
);
414 if (mdev
->port
== ds
->port
&& mdev
->unit
== ds
->unit
)
419 static int setup_maple_commands(struct device
*device
, void *ignored
)
422 struct maple_device
*mdev
= to_maple_dev(device
);
423 if (mdev
->interval
> 0 && atomic_read(&mdev
->busy
) == 0 &&
424 time_after(jiffies
, mdev
->when
)) {
425 /* bounce if we cannot add */
426 add
= maple_add_packet(mdev
,
427 be32_to_cpu(mdev
->devinfo
.function
),
428 MAPLE_COMMAND_GETCOND
, 1, NULL
);
430 mdev
->when
= jiffies
+ mdev
->interval
;
432 if (time_after(jiffies
, maple_pnp_time
))
433 /* Ensure we don't have block reads and devinfo
434 * calls interfering with one another - so flag the
436 if (atomic_read(&mdev
->busy
) == 0) {
437 atomic_set(&mdev
->busy
, 1);
438 maple_add_packet(mdev
, 0,
439 MAPLE_COMMAND_DEVINFO
, 0, NULL
);
445 /* VBLANK bottom half - implemented via workqueue */
446 static void maple_vblank_handler(struct work_struct
*work
)
449 struct maple_device
*mdev
;
451 if (!maple_dma_done())
454 __raw_writel(0, MAPLE_ENABLE
);
456 if (!list_empty(&maple_sentq
))
460 * Set up essential commands - to fetch data and
461 * check devices are still present
463 bus_for_each_dev(&maple_bus_type
, NULL
, NULL
,
464 setup_maple_commands
);
466 if (time_after(jiffies
, maple_pnp_time
)) {
468 * Scan the empty ports - bus is flakey and may have
469 * mis-reported emptyness
471 for (x
= 0; x
< MAPLE_PORTS
; x
++) {
472 if (checked
[x
] && empty
[x
]) {
476 atomic_set(&mdev
->busy
, 1);
477 locking
= maple_add_packet(mdev
, 0,
478 MAPLE_COMMAND_DEVINFO
, 0, NULL
);
484 maple_pnp_time
= jiffies
+ MAPLE_PNP_INTERVAL
;
491 /* handle devices added via hotplugs - placing them on queue for DEVINFO */
492 static void maple_map_subunits(struct maple_device
*mdev
, int submask
)
494 int retval
, k
, devcheck
;
495 struct maple_device
*mdev_add
;
496 struct maple_device_specify ds
;
498 ds
.port
= mdev
->port
;
499 for (k
= 0; k
< 5; k
++) {
502 bus_for_each_dev(&maple_bus_type
, NULL
, &ds
,
505 submask
= submask
>> 1;
508 devcheck
= submask
& 0x01;
510 mdev_add
= maple_alloc_dev(mdev
->port
, k
+ 1);
513 atomic_set(&mdev_add
->busy
, 1);
514 maple_add_packet(mdev_add
, 0, MAPLE_COMMAND_DEVINFO
,
516 /* mark that we are checking sub devices */
519 submask
= submask
>> 1;
523 /* mark a device as removed */
524 static void maple_clean_submap(struct maple_device
*mdev
)
528 killbit
= (mdev
->unit
> 0 ? (1 << (mdev
->unit
- 1)) & 0x1f : 0x20);
531 subdevice_map
[mdev
->port
] = subdevice_map
[mdev
->port
] & killbit
;
534 /* handle empty port or hotplug removal */
535 static void maple_response_none(struct maple_device
*mdev
)
537 maple_clean_submap(mdev
);
539 if (likely(mdev
->unit
!= 0)) {
541 * Block devices play up
542 * and give the impression they have
543 * been removed even when still in place or
544 * trip the mtd layer when they have
545 * really gone - this code traps that eventuality
546 * and ensures we aren't overloaded with useless
549 if (mdev
->can_unload
) {
550 if (!mdev
->can_unload(mdev
)) {
551 atomic_set(&mdev
->busy
, 2);
552 wake_up(&mdev
->maple_wait
);
557 dev_info(&mdev
->dev
, "detaching device at (%d, %d)\n",
558 mdev
->port
, mdev
->unit
);
559 maple_detach_driver(mdev
);
562 if (!started
|| !fullscan
) {
563 if (checked
[mdev
->port
] == false) {
564 checked
[mdev
->port
] = true;
565 empty
[mdev
->port
] = true;
566 dev_info(&mdev
->dev
, "no devices"
567 " to port %d\n", mdev
->port
);
572 /* Some hardware devices generate false detach messages on unit 0 */
573 atomic_set(&mdev
->busy
, 0);
576 /* preprocess hotplugs or scans */
577 static void maple_response_devinfo(struct maple_device
*mdev
,
581 if (!started
|| (scanning
== 2) || !fullscan
) {
582 if ((mdev
->unit
== 0) && (checked
[mdev
->port
] == false)) {
583 checked
[mdev
->port
] = true;
584 maple_attach_driver(mdev
);
587 maple_attach_driver(mdev
);
588 if (mdev
->unit
== 0) {
589 empty
[mdev
->port
] = false;
590 maple_attach_driver(mdev
);
594 if (mdev
->unit
== 0) {
595 submask
= recvbuf
[2] & 0x1F;
596 if (submask
^ subdevice_map
[mdev
->port
]) {
597 maple_map_subunits(mdev
, submask
);
598 subdevice_map
[mdev
->port
] = submask
;
603 static void maple_response_fileerr(struct maple_device
*mdev
, void *recvbuf
)
605 if (mdev
->fileerr_handler
) {
606 mdev
->fileerr_handler(mdev
, recvbuf
);
609 dev_warn(&mdev
->dev
, "device at (%d, %d) reports"
610 "file error 0x%X\n", mdev
->port
, mdev
->unit
,
611 ((int *)recvbuf
)[1]);
614 static void maple_port_rescan(void)
617 struct maple_device
*mdev
;
620 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
621 if (checked
[i
] == false) {
624 maple_add_packet(mdev
, 0, MAPLE_COMMAND_DEVINFO
,
630 /* maple dma end bottom half - implemented via workqueue */
631 static void maple_dma_handler(struct work_struct
*work
)
633 struct mapleq
*mq
, *nmq
;
634 struct maple_device
*mdev
;
636 enum maple_code code
;
638 if (!maple_dma_done())
640 __raw_writel(0, MAPLE_ENABLE
);
641 if (!list_empty(&maple_sentq
)) {
642 list_for_each_entry_safe(mq
, nmq
, &maple_sentq
, list
) {
644 recvbuf
= mq
->recvbuf
->buf
;
645 dma_cache_sync(&mdev
->dev
, recvbuf
, 0x400,
649 list_del_init(&mq
->list
);
651 case MAPLE_RESPONSE_NONE
:
652 maple_response_none(mdev
);
655 case MAPLE_RESPONSE_DEVINFO
:
656 maple_response_devinfo(mdev
, recvbuf
);
657 atomic_set(&mdev
->busy
, 0);
660 case MAPLE_RESPONSE_DATATRF
:
663 atomic_set(&mdev
->busy
, 0);
664 wake_up(&mdev
->maple_wait
);
667 case MAPLE_RESPONSE_FILEERR
:
668 maple_response_fileerr(mdev
, recvbuf
);
669 atomic_set(&mdev
->busy
, 0);
670 wake_up(&mdev
->maple_wait
);
673 case MAPLE_RESPONSE_AGAIN
:
674 case MAPLE_RESPONSE_BADCMD
:
675 case MAPLE_RESPONSE_BADFUNC
:
676 dev_warn(&mdev
->dev
, "non-fatal error"
677 " 0x%X at (%d, %d)\n", code
,
678 mdev
->port
, mdev
->unit
);
679 atomic_set(&mdev
->busy
, 0);
682 case MAPLE_RESPONSE_ALLINFO
:
683 dev_notice(&mdev
->dev
, "extended"
684 " device information request for (%d, %d)"
685 " but call is not supported\n", mdev
->port
,
687 atomic_set(&mdev
->busy
, 0);
690 case MAPLE_RESPONSE_OK
:
691 atomic_set(&mdev
->busy
, 0);
692 wake_up(&mdev
->maple_wait
);
699 /* if scanning is 1 then we have subdevices to check */
705 /*check if we have actually tested all ports yet */
708 /* mark that we have been through the first scan */
714 static irqreturn_t
maple_dma_interrupt(int irq
, void *dev_id
)
716 /* Load everything into the bottom half */
717 schedule_work(&maple_dma_process
);
721 static irqreturn_t
maple_vblank_interrupt(int irq
, void *dev_id
)
723 schedule_work(&maple_vblank_process
);
727 static int maple_set_dma_interrupt_handler(void)
729 return request_irq(HW_EVENT_MAPLE_DMA
, maple_dma_interrupt
,
730 IRQF_SHARED
, "maple bus DMA", &maple_unsupported_device
);
733 static int maple_set_vblank_interrupt_handler(void)
735 return request_irq(HW_EVENT_VSYNC
, maple_vblank_interrupt
,
736 IRQF_SHARED
, "maple bus VBLANK", &maple_unsupported_device
);
739 static int maple_get_dma_buffer(void)
742 (void *) __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
749 static int maple_match_bus_driver(struct device
*devptr
,
750 struct device_driver
*drvptr
)
752 struct maple_driver
*maple_drv
= to_maple_driver(drvptr
);
753 struct maple_device
*maple_dev
= to_maple_dev(devptr
);
755 /* Trap empty port case */
756 if (maple_dev
->devinfo
.function
== 0xFFFFFFFF)
758 else if (maple_dev
->devinfo
.function
&
759 cpu_to_be32(maple_drv
->function
))
764 static int maple_bus_uevent(struct device
*dev
,
765 struct kobj_uevent_env
*env
)
770 static void maple_bus_release(struct device
*dev
)
774 static struct maple_driver maple_unsupported_device
= {
776 .name
= "maple_unsupported_device",
777 .bus
= &maple_bus_type
,
781 * maple_bus_type - core maple bus structure
783 struct bus_type maple_bus_type
= {
785 .match
= maple_match_bus_driver
,
786 .uevent
= maple_bus_uevent
,
788 EXPORT_SYMBOL_GPL(maple_bus_type
);
790 static struct device maple_bus
= {
791 .init_name
= "maple",
792 .release
= maple_bus_release
,
795 static int __init
maple_bus_init(void)
798 struct maple_device
*mdev
[MAPLE_PORTS
];
800 __raw_writel(0, MAPLE_ENABLE
);
802 retval
= device_register(&maple_bus
);
806 retval
= bus_register(&maple_bus_type
);
810 retval
= driver_register(&maple_unsupported_device
.drv
);
814 /* allocate memory for maple bus dma */
815 retval
= maple_get_dma_buffer();
817 dev_err(&maple_bus
, "failed to allocate DMA buffers\n");
821 /* set up DMA interrupt handler */
822 retval
= maple_set_dma_interrupt_handler();
824 dev_err(&maple_bus
, "bus failed to grab maple "
829 /* set up VBLANK interrupt handler */
830 retval
= maple_set_vblank_interrupt_handler();
832 dev_err(&maple_bus
, "bus failed to grab VBLANK IRQ\n");
836 maple_queue_cache
= KMEM_CACHE(maple_buffer
, SLAB_HWCACHE_ALIGN
);
838 if (!maple_queue_cache
)
839 goto cleanup_bothirqs
;
841 INIT_LIST_HEAD(&maple_waitq
);
842 INIT_LIST_HEAD(&maple_sentq
);
844 /* setup maple ports */
845 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
848 mdev
[i
] = maple_alloc_dev(i
, 0);
851 maple_free_dev(mdev
[i
]);
854 baseunits
[i
] = mdev
[i
];
855 atomic_set(&mdev
[i
]->busy
, 1);
856 maple_add_packet(mdev
[i
], 0, MAPLE_COMMAND_DEVINFO
, 0, NULL
);
857 subdevice_map
[i
] = 0;
860 maple_pnp_time
= jiffies
+ HZ
;
861 /* prepare initial queue */
863 dev_info(&maple_bus
, "bus core now registered\n");
868 kmem_cache_destroy(maple_queue_cache
);
871 free_irq(HW_EVENT_VSYNC
, 0);
874 free_irq(HW_EVENT_MAPLE_DMA
, 0);
877 free_pages((unsigned long) maple_sendbuf
, MAPLE_DMA_PAGES
);
880 driver_unregister(&maple_unsupported_device
.drv
);
883 bus_unregister(&maple_bus_type
);
886 device_unregister(&maple_bus
);
889 printk(KERN_ERR
"Maple bus registration failed\n");
892 /* Push init to later to ensure hardware gets detected */
893 fs_initcall(maple_bus_init
);