2 * Core maple bus functionality
4 * Copyright (C) 2007 - 2009 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt
6 * Copyright (C) 2000 - 2001 YAEGASHI Takeshi
7 * Copyright (C) 2001 M. R. Brown
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/list.h>
19 #include <linux/slab.h>
20 #include <linux/maple.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/delay.h>
23 #include <asm/cacheflush.h>
27 #include <mach/sysasic.h>
29 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
30 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
31 MODULE_LICENSE("GPL v2");
32 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
34 static void maple_dma_handler(struct work_struct
*work
);
35 static void maple_vblank_handler(struct work_struct
*work
);
37 static DECLARE_WORK(maple_dma_process
, maple_dma_handler
);
38 static DECLARE_WORK(maple_vblank_process
, maple_vblank_handler
);
40 static LIST_HEAD(maple_waitq
);
41 static LIST_HEAD(maple_sentq
);
43 /* mutex to protect queue of waiting packets */
44 static DEFINE_MUTEX(maple_wlist_lock
);
46 static struct maple_driver maple_unsupported_device
;
47 static struct device maple_bus
;
48 static int subdevice_map
[MAPLE_PORTS
];
49 static unsigned long *maple_sendbuf
, *maple_sendptr
, *maple_lastptr
;
50 static unsigned long maple_pnp_time
;
51 static int started
, scanning
, fullscan
;
52 static struct kmem_cache
*maple_queue_cache
;
54 struct maple_device_specify
{
59 static bool checked
[MAPLE_PORTS
];
60 static bool empty
[MAPLE_PORTS
];
61 static struct maple_device
*baseunits
[MAPLE_PORTS
];
64 * maple_driver_register - register a maple driver
65 * @drv: maple driver to be registered.
67 * Registers the passed in @drv, while updating the bus type.
68 * Devices with matching function IDs will be automatically probed.
70 int maple_driver_register(struct maple_driver
*drv
)
75 drv
->drv
.bus
= &maple_bus_type
;
77 return driver_register(&drv
->drv
);
79 EXPORT_SYMBOL_GPL(maple_driver_register
);
82 * maple_driver_unregister - unregister a maple driver.
83 * @drv: maple driver to unregister.
85 * Cleans up after maple_driver_register(). To be invoked in the exit
86 * path of any module drivers.
88 void maple_driver_unregister(struct maple_driver
*drv
)
90 driver_unregister(&drv
->drv
);
92 EXPORT_SYMBOL_GPL(maple_driver_unregister
);
94 /* set hardware registers to enable next round of dma */
95 static void maple_dma_reset(void)
97 ctrl_outl(MAPLE_MAGIC
, MAPLE_RESET
);
98 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
99 ctrl_outl(1, MAPLE_TRIGTYPE
);
101 * Maple system register
102 * bits 31 - 16 timeout in units of 20nsec
103 * bit 12 hard trigger - set 0 to keep responding to VBLANK
104 * bits 9 - 8 set 00 for 2 Mbps, 01 for 1 Mbps
105 * bits 3 - 0 delay (in 1.3ms) between VBLANK and start of DMA
108 ctrl_outl(MAPLE_2MBPS
| MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED
);
109 ctrl_outl(PHYSADDR(maple_sendbuf
), MAPLE_DMAADDR
);
110 ctrl_outl(1, MAPLE_ENABLE
);
114 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
115 * @dev: device responding
116 * @callback: handler callback
117 * @interval: interval in jiffies between callbacks
118 * @function: the function code for the device
120 void maple_getcond_callback(struct maple_device
*dev
,
121 void (*callback
) (struct mapleq
*mq
),
122 unsigned long interval
, unsigned long function
)
124 dev
->callback
= callback
;
125 dev
->interval
= interval
;
126 dev
->function
= cpu_to_be32(function
);
129 EXPORT_SYMBOL_GPL(maple_getcond_callback
);
131 static int maple_dma_done(void)
133 return (ctrl_inl(MAPLE_STATE
) & 1) == 0;
136 static void maple_release_device(struct device
*dev
)
138 struct maple_device
*mdev
;
141 mdev
= to_maple_dev(dev
);
143 kmem_cache_free(maple_queue_cache
, mq
->recvbuf
);
149 * maple_add_packet - add a single instruction to the maple bus queue
150 * @mdev: maple device
151 * @function: function on device being queried
152 * @command: maple command to add
153 * @length: length of command string (in 32 bit words)
154 * @data: remainder of command string
156 int maple_add_packet(struct maple_device
*mdev
, u32 function
, u32 command
,
157 size_t length
, void *data
)
160 void *sendbuf
= NULL
;
163 sendbuf
= kzalloc(length
* 4, GFP_KERNEL
);
168 ((__be32
*)sendbuf
)[0] = cpu_to_be32(function
);
171 mdev
->mq
->command
= command
;
172 mdev
->mq
->length
= length
;
174 memcpy(sendbuf
+ 4, data
, (length
- 1) * 4);
175 mdev
->mq
->sendbuf
= sendbuf
;
177 mutex_lock(&maple_wlist_lock
);
178 list_add_tail(&mdev
->mq
->list
, &maple_waitq
);
179 mutex_unlock(&maple_wlist_lock
);
183 EXPORT_SYMBOL_GPL(maple_add_packet
);
185 static struct mapleq
*maple_allocq(struct maple_device
*mdev
)
189 mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
193 INIT_LIST_HEAD(&mq
->list
);
195 mq
->recvbuf
= kmem_cache_zalloc(maple_queue_cache
, GFP_KERNEL
);
198 mq
->recvbuf
->buf
= &((mq
->recvbuf
->bufx
)[0]);
205 dev_err(&mdev
->dev
, "could not allocate memory for device (%d, %d)\n",
206 mdev
->port
, mdev
->unit
);
210 static struct maple_device
*maple_alloc_dev(int port
, int unit
)
212 struct maple_device
*mdev
;
214 /* zero this out to avoid kobj subsystem
215 * thinking it has already been registered */
217 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
224 mdev
->mq
= maple_allocq(mdev
);
230 mdev
->dev
.bus
= &maple_bus_type
;
231 mdev
->dev
.parent
= &maple_bus
;
232 init_waitqueue_head(&mdev
->maple_wait
);
236 static void maple_free_dev(struct maple_device
*mdev
)
238 kmem_cache_free(maple_queue_cache
, mdev
->mq
->recvbuf
);
243 /* process the command queue into a maple command block
244 * terminating command has bit 32 of first long set to 0
246 static void maple_build_block(struct mapleq
*mq
)
248 int port
, unit
, from
, to
, len
;
249 unsigned long *lsendbuf
= mq
->sendbuf
;
251 port
= mq
->dev
->port
& 3;
252 unit
= mq
->dev
->unit
;
255 to
= (port
<< 6) | (unit
> 0 ? (1 << (unit
- 1)) & 0x1f : 0x20);
257 *maple_lastptr
&= 0x7fffffff;
258 maple_lastptr
= maple_sendptr
;
260 *maple_sendptr
++ = (port
<< 16) | len
| 0x80000000;
261 *maple_sendptr
++ = PHYSADDR(mq
->recvbuf
->buf
);
263 mq
->command
| (to
<< 8) | (from
<< 16) | (len
<< 24);
265 *maple_sendptr
++ = *lsendbuf
++;
268 /* build up command queue */
269 static void maple_send(void)
271 int i
, maple_packets
= 0;
272 struct mapleq
*mq
, *nmq
;
274 if (!maple_dma_done())
278 ctrl_outl(0, MAPLE_ENABLE
);
280 if (!list_empty(&maple_sentq
))
283 mutex_lock(&maple_wlist_lock
);
284 if (list_empty(&maple_waitq
)) {
285 mutex_unlock(&maple_wlist_lock
);
289 maple_lastptr
= maple_sendbuf
;
290 maple_sendptr
= maple_sendbuf
;
292 list_for_each_entry_safe(mq
, nmq
, &maple_waitq
, list
) {
293 maple_build_block(mq
);
294 list_del_init(&mq
->list
);
295 list_add_tail(&mq
->list
, &maple_sentq
);
296 if (maple_packets
++ > MAPLE_MAXPACKETS
)
299 mutex_unlock(&maple_wlist_lock
);
300 if (maple_packets
> 0) {
301 for (i
= 0; i
< (1 << MAPLE_DMA_PAGES
); i
++)
302 dma_cache_sync(0, maple_sendbuf
+ i
* PAGE_SIZE
,
303 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
310 /* check if there is a driver registered likely to match this device */
311 static int maple_check_matching_driver(struct device_driver
*driver
,
314 struct maple_driver
*maple_drv
;
315 struct maple_device
*mdev
;
318 maple_drv
= to_maple_driver(driver
);
319 if (mdev
->devinfo
.function
& cpu_to_be32(maple_drv
->function
))
324 static void maple_detach_driver(struct maple_device
*mdev
)
326 device_unregister(&mdev
->dev
);
329 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
330 static void maple_attach_driver(struct maple_device
*mdev
)
333 unsigned long function
;
336 recvbuf
= mdev
->mq
->recvbuf
->buf
;
337 /* copy the data as individual elements in
338 * case of memory optimisation */
339 memcpy(&mdev
->devinfo
.function
, recvbuf
+ 4, 4);
340 memcpy(&mdev
->devinfo
.function_data
[0], recvbuf
+ 8, 12);
341 memcpy(&mdev
->devinfo
.area_code
, recvbuf
+ 20, 1);
342 memcpy(&mdev
->devinfo
.connector_direction
, recvbuf
+ 21, 1);
343 memcpy(&mdev
->devinfo
.product_name
[0], recvbuf
+ 22, 30);
344 memcpy(&mdev
->devinfo
.standby_power
, recvbuf
+ 112, 2);
345 memcpy(&mdev
->devinfo
.max_power
, recvbuf
+ 114, 2);
346 memcpy(mdev
->product_name
, mdev
->devinfo
.product_name
, 30);
347 mdev
->product_name
[30] = '\0';
348 memcpy(mdev
->product_licence
, mdev
->devinfo
.product_licence
, 60);
349 mdev
->product_licence
[60] = '\0';
351 for (p
= mdev
->product_name
+ 29; mdev
->product_name
<= p
; p
--)
356 for (p
= mdev
->product_licence
+ 59; mdev
->product_licence
<= p
; p
--)
362 function
= be32_to_cpu(mdev
->devinfo
.function
);
364 dev_info(&mdev
->dev
, "detected %s: function 0x%lX: at (%d, %d)\n",
365 mdev
->product_name
, function
, mdev
->port
, mdev
->unit
);
367 if (function
> 0x200) {
368 /* Do this silently - as not a real device */
370 mdev
->driver
= &maple_unsupported_device
;
371 dev_set_name(&mdev
->dev
, "%d:0.port", mdev
->port
);
374 bus_for_each_drv(&maple_bus_type
, NULL
, mdev
,
375 maple_check_matching_driver
);
378 /* Driver does not exist yet */
379 dev_info(&mdev
->dev
, "no driver found\n");
380 mdev
->driver
= &maple_unsupported_device
;
382 dev_set_name(&mdev
->dev
, "%d:0%d.%lX", mdev
->port
,
383 mdev
->unit
, function
);
386 mdev
->function
= function
;
387 mdev
->dev
.release
= &maple_release_device
;
389 atomic_set(&mdev
->busy
, 0);
390 error
= device_register(&mdev
->dev
);
392 dev_warn(&mdev
->dev
, "could not register device at"
393 " (%d, %d), with error 0x%X\n", mdev
->unit
,
395 maple_free_dev(mdev
);
402 * if device has been registered for the given
403 * port and unit then return 1 - allows identification
404 * of which devices need to be attached or detached
406 static int check_maple_device(struct device
*device
, void *portptr
)
408 struct maple_device_specify
*ds
;
409 struct maple_device
*mdev
;
412 mdev
= to_maple_dev(device
);
413 if (mdev
->port
== ds
->port
&& mdev
->unit
== ds
->unit
)
418 static int setup_maple_commands(struct device
*device
, void *ignored
)
421 struct maple_device
*mdev
= to_maple_dev(device
);
422 if (mdev
->interval
> 0 && atomic_read(&mdev
->busy
) == 0 &&
423 time_after(jiffies
, mdev
->when
)) {
424 /* bounce if we cannot add */
425 add
= maple_add_packet(mdev
,
426 be32_to_cpu(mdev
->devinfo
.function
),
427 MAPLE_COMMAND_GETCOND
, 1, NULL
);
429 mdev
->when
= jiffies
+ mdev
->interval
;
431 if (time_after(jiffies
, maple_pnp_time
))
432 /* Ensure we don't have block reads and devinfo
433 * calls interfering with one another - so flag the
435 if (atomic_read(&mdev
->busy
) == 0) {
436 atomic_set(&mdev
->busy
, 1);
437 maple_add_packet(mdev
, 0,
438 MAPLE_COMMAND_DEVINFO
, 0, NULL
);
444 /* VBLANK bottom half - implemented via workqueue */
445 static void maple_vblank_handler(struct work_struct
*work
)
448 struct maple_device
*mdev
;
450 if (!maple_dma_done())
453 ctrl_outl(0, MAPLE_ENABLE
);
455 if (!list_empty(&maple_sentq
))
459 * Set up essential commands - to fetch data and
460 * check devices are still present
462 bus_for_each_dev(&maple_bus_type
, NULL
, NULL
,
463 setup_maple_commands
);
465 if (time_after(jiffies
, maple_pnp_time
)) {
467 * Scan the empty ports - bus is flakey and may have
468 * mis-reported emptyness
470 for (x
= 0; x
< MAPLE_PORTS
; x
++) {
471 if (checked
[x
] && empty
[x
]) {
475 atomic_set(&mdev
->busy
, 1);
476 locking
= maple_add_packet(mdev
, 0,
477 MAPLE_COMMAND_DEVINFO
, 0, NULL
);
483 maple_pnp_time
= jiffies
+ MAPLE_PNP_INTERVAL
;
490 /* handle devices added via hotplugs - placing them on queue for DEVINFO */
491 static void maple_map_subunits(struct maple_device
*mdev
, int submask
)
493 int retval
, k
, devcheck
;
494 struct maple_device
*mdev_add
;
495 struct maple_device_specify ds
;
497 ds
.port
= mdev
->port
;
498 for (k
= 0; k
< 5; k
++) {
501 bus_for_each_dev(&maple_bus_type
, NULL
, &ds
,
504 submask
= submask
>> 1;
507 devcheck
= submask
& 0x01;
509 mdev_add
= maple_alloc_dev(mdev
->port
, k
+ 1);
512 atomic_set(&mdev_add
->busy
, 1);
513 maple_add_packet(mdev_add
, 0, MAPLE_COMMAND_DEVINFO
,
515 /* mark that we are checking sub devices */
518 submask
= submask
>> 1;
522 /* mark a device as removed */
523 static void maple_clean_submap(struct maple_device
*mdev
)
527 killbit
= (mdev
->unit
> 0 ? (1 << (mdev
->unit
- 1)) & 0x1f : 0x20);
530 subdevice_map
[mdev
->port
] = subdevice_map
[mdev
->port
] & killbit
;
533 /* handle empty port or hotplug removal */
534 static void maple_response_none(struct maple_device
*mdev
)
536 maple_clean_submap(mdev
);
538 if (likely(mdev
->unit
!= 0)) {
540 * Block devices play up
541 * and give the impression they have
542 * been removed even when still in place or
543 * trip the mtd layer when they have
544 * really gone - this code traps that eventuality
545 * and ensures we aren't overloaded with useless
548 if (mdev
->can_unload
) {
549 if (!mdev
->can_unload(mdev
)) {
550 atomic_set(&mdev
->busy
, 2);
551 wake_up(&mdev
->maple_wait
);
556 dev_info(&mdev
->dev
, "detaching device at (%d, %d)\n",
557 mdev
->port
, mdev
->unit
);
558 maple_detach_driver(mdev
);
561 if (!started
|| !fullscan
) {
562 if (checked
[mdev
->port
] == false) {
563 checked
[mdev
->port
] = true;
564 empty
[mdev
->port
] = true;
565 dev_info(&mdev
->dev
, "no devices"
566 " to port %d\n", mdev
->port
);
571 /* Some hardware devices generate false detach messages on unit 0 */
572 atomic_set(&mdev
->busy
, 0);
575 /* preprocess hotplugs or scans */
576 static void maple_response_devinfo(struct maple_device
*mdev
,
580 if (!started
|| (scanning
== 2) || !fullscan
) {
581 if ((mdev
->unit
== 0) && (checked
[mdev
->port
] == false)) {
582 checked
[mdev
->port
] = true;
583 maple_attach_driver(mdev
);
586 maple_attach_driver(mdev
);
587 if (mdev
->unit
== 0) {
588 empty
[mdev
->port
] = false;
589 maple_attach_driver(mdev
);
593 if (mdev
->unit
== 0) {
594 submask
= recvbuf
[2] & 0x1F;
595 if (submask
^ subdevice_map
[mdev
->port
]) {
596 maple_map_subunits(mdev
, submask
);
597 subdevice_map
[mdev
->port
] = submask
;
602 static void maple_response_fileerr(struct maple_device
*mdev
, void *recvbuf
)
604 if (mdev
->fileerr_handler
) {
605 mdev
->fileerr_handler(mdev
, recvbuf
);
608 dev_warn(&mdev
->dev
, "device at (%d, %d) reports"
609 "file error 0x%X\n", mdev
->port
, mdev
->unit
,
610 ((int *)recvbuf
)[1]);
613 static void maple_port_rescan(void)
616 struct maple_device
*mdev
;
619 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
620 if (checked
[i
] == false) {
623 maple_add_packet(mdev
, 0, MAPLE_COMMAND_DEVINFO
,
629 /* maple dma end bottom half - implemented via workqueue */
630 static void maple_dma_handler(struct work_struct
*work
)
632 struct mapleq
*mq
, *nmq
;
633 struct maple_device
*mdev
;
635 enum maple_code code
;
637 if (!maple_dma_done())
639 ctrl_outl(0, MAPLE_ENABLE
);
640 if (!list_empty(&maple_sentq
)) {
641 list_for_each_entry_safe(mq
, nmq
, &maple_sentq
, list
) {
643 recvbuf
= mq
->recvbuf
->buf
;
644 dma_cache_sync(&mdev
->dev
, recvbuf
, 0x400,
648 list_del_init(&mq
->list
);
650 case MAPLE_RESPONSE_NONE
:
651 maple_response_none(mdev
);
654 case MAPLE_RESPONSE_DEVINFO
:
655 maple_response_devinfo(mdev
, recvbuf
);
656 atomic_set(&mdev
->busy
, 0);
659 case MAPLE_RESPONSE_DATATRF
:
662 atomic_set(&mdev
->busy
, 0);
663 wake_up(&mdev
->maple_wait
);
666 case MAPLE_RESPONSE_FILEERR
:
667 maple_response_fileerr(mdev
, recvbuf
);
668 atomic_set(&mdev
->busy
, 0);
669 wake_up(&mdev
->maple_wait
);
672 case MAPLE_RESPONSE_AGAIN
:
673 case MAPLE_RESPONSE_BADCMD
:
674 case MAPLE_RESPONSE_BADFUNC
:
675 dev_warn(&mdev
->dev
, "non-fatal error"
676 " 0x%X at (%d, %d)\n", code
,
677 mdev
->port
, mdev
->unit
);
678 atomic_set(&mdev
->busy
, 0);
681 case MAPLE_RESPONSE_ALLINFO
:
682 dev_notice(&mdev
->dev
, "extended"
683 " device information request for (%d, %d)"
684 " but call is not supported\n", mdev
->port
,
686 atomic_set(&mdev
->busy
, 0);
689 case MAPLE_RESPONSE_OK
:
690 atomic_set(&mdev
->busy
, 0);
691 wake_up(&mdev
->maple_wait
);
698 /* if scanning is 1 then we have subdevices to check */
704 /*check if we have actually tested all ports yet */
707 /* mark that we have been through the first scan */
713 static irqreturn_t
maple_dma_interrupt(int irq
, void *dev_id
)
715 /* Load everything into the bottom half */
716 schedule_work(&maple_dma_process
);
720 static irqreturn_t
maple_vblank_interrupt(int irq
, void *dev_id
)
722 schedule_work(&maple_vblank_process
);
726 static int maple_set_dma_interrupt_handler(void)
728 return request_irq(HW_EVENT_MAPLE_DMA
, maple_dma_interrupt
,
729 IRQF_SHARED
, "maple bus DMA", &maple_unsupported_device
);
732 static int maple_set_vblank_interrupt_handler(void)
734 return request_irq(HW_EVENT_VSYNC
, maple_vblank_interrupt
,
735 IRQF_SHARED
, "maple bus VBLANK", &maple_unsupported_device
);
738 static int maple_get_dma_buffer(void)
741 (void *) __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
748 static int maple_match_bus_driver(struct device
*devptr
,
749 struct device_driver
*drvptr
)
751 struct maple_driver
*maple_drv
= to_maple_driver(drvptr
);
752 struct maple_device
*maple_dev
= to_maple_dev(devptr
);
754 /* Trap empty port case */
755 if (maple_dev
->devinfo
.function
== 0xFFFFFFFF)
757 else if (maple_dev
->devinfo
.function
&
758 cpu_to_be32(maple_drv
->function
))
763 static int maple_bus_uevent(struct device
*dev
,
764 struct kobj_uevent_env
*env
)
769 static void maple_bus_release(struct device
*dev
)
773 static struct maple_driver maple_unsupported_device
= {
775 .name
= "maple_unsupported_device",
776 .bus
= &maple_bus_type
,
780 * maple_bus_type - core maple bus structure
782 struct bus_type maple_bus_type
= {
784 .match
= maple_match_bus_driver
,
785 .uevent
= maple_bus_uevent
,
787 EXPORT_SYMBOL_GPL(maple_bus_type
);
789 static struct device maple_bus
= {
790 .init_name
= "maple",
791 .release
= maple_bus_release
,
794 static int __init
maple_bus_init(void)
797 struct maple_device
*mdev
[MAPLE_PORTS
];
799 ctrl_outl(0, MAPLE_ENABLE
);
801 retval
= device_register(&maple_bus
);
805 retval
= bus_register(&maple_bus_type
);
809 retval
= driver_register(&maple_unsupported_device
.drv
);
813 /* allocate memory for maple bus dma */
814 retval
= maple_get_dma_buffer();
816 dev_err(&maple_bus
, "failed to allocate DMA buffers\n");
820 /* set up DMA interrupt handler */
821 retval
= maple_set_dma_interrupt_handler();
823 dev_err(&maple_bus
, "bus failed to grab maple "
828 /* set up VBLANK interrupt handler */
829 retval
= maple_set_vblank_interrupt_handler();
831 dev_err(&maple_bus
, "bus failed to grab VBLANK IRQ\n");
835 maple_queue_cache
= KMEM_CACHE(maple_buffer
, SLAB_HWCACHE_ALIGN
);
837 if (!maple_queue_cache
)
838 goto cleanup_bothirqs
;
840 INIT_LIST_HEAD(&maple_waitq
);
841 INIT_LIST_HEAD(&maple_sentq
);
843 /* setup maple ports */
844 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
847 mdev
[i
] = maple_alloc_dev(i
, 0);
850 maple_free_dev(mdev
[i
]);
853 baseunits
[i
] = mdev
[i
];
854 atomic_set(&mdev
[i
]->busy
, 1);
855 maple_add_packet(mdev
[i
], 0, MAPLE_COMMAND_DEVINFO
, 0, NULL
);
856 subdevice_map
[i
] = 0;
859 maple_pnp_time
= jiffies
+ HZ
;
860 /* prepare initial queue */
862 dev_info(&maple_bus
, "bus core now registered\n");
867 kmem_cache_destroy(maple_queue_cache
);
870 free_irq(HW_EVENT_VSYNC
, 0);
873 free_irq(HW_EVENT_MAPLE_DMA
, 0);
876 free_pages((unsigned long) maple_sendbuf
, MAPLE_DMA_PAGES
);
879 driver_unregister(&maple_unsupported_device
.drv
);
882 bus_unregister(&maple_bus_type
);
885 device_unregister(&maple_bus
);
888 printk(KERN_ERR
"Maple bus registration failed\n");
891 /* Push init to later to ensure hardware gets detected */
892 fs_initcall(maple_bus_init
);