2 * Core maple bus functionality
4 * Copyright (C) 2007, 2008 Adrian McMenamin
5 * Copyright (C) 2001 - 2008 Paul Mundt
7 * Based on 2.4 code by:
9 * Copyright (C) 2000-2001 YAEGASHI Takeshi
10 * Copyright (C) 2001 M. R. Brown
11 * Copyright (C) 2001 Paul Mundt
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/maple.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/delay.h>
29 #include <asm/cacheflush.h>
33 #include <mach/sysasic.h>
35 MODULE_AUTHOR("Yaegashi Takeshi, Paul Mundt, M. R. Brown, Adrian McMenamin");
36 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
37 MODULE_LICENSE("GPL v2");
38 MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
40 static void maple_dma_handler(struct work_struct
*work
);
41 static void maple_vblank_handler(struct work_struct
*work
);
43 static DECLARE_WORK(maple_dma_process
, maple_dma_handler
);
44 static DECLARE_WORK(maple_vblank_process
, maple_vblank_handler
);
46 static LIST_HEAD(maple_waitq
);
47 static LIST_HEAD(maple_sentq
);
49 /* mutex to protect queue of waiting packets */
50 static DEFINE_MUTEX(maple_wlist_lock
);
52 static struct maple_driver maple_dummy_driver
;
53 static struct device maple_bus
;
54 static int subdevice_map
[MAPLE_PORTS
];
55 static unsigned long *maple_sendbuf
, *maple_sendptr
, *maple_lastptr
;
56 static unsigned long maple_pnp_time
;
57 static int started
, scanning
, fullscan
;
58 static struct kmem_cache
*maple_queue_cache
;
60 struct maple_device_specify
{
65 static bool checked
[4];
66 static struct maple_device
*baseunits
[4];
69 * maple_driver_register - register a maple driver
70 * @drv: maple driver to be registered.
72 * Registers the passed in @drv, while updating the bus type.
73 * Devices with matching function IDs will be automatically probed.
75 int maple_driver_register(struct maple_driver
*drv
)
80 drv
->drv
.bus
= &maple_bus_type
;
82 return driver_register(&drv
->drv
);
84 EXPORT_SYMBOL_GPL(maple_driver_register
);
87 * maple_driver_unregister - unregister a maple driver.
88 * @drv: maple driver to unregister.
90 * Cleans up after maple_driver_register(). To be invoked in the exit
91 * path of any module drivers.
93 void maple_driver_unregister(struct maple_driver
*drv
)
95 driver_unregister(&drv
->drv
);
97 EXPORT_SYMBOL_GPL(maple_driver_unregister
);
99 /* set hardware registers to enable next round of dma */
100 static void maplebus_dma_reset(void)
102 ctrl_outl(MAPLE_MAGIC
, MAPLE_RESET
);
103 /* set trig type to 0 for software trigger, 1 for hardware (VBLANK) */
104 ctrl_outl(1, MAPLE_TRIGTYPE
);
105 ctrl_outl(MAPLE_2MBPS
| MAPLE_TIMEOUT(50000), MAPLE_SPEED
);
106 ctrl_outl(PHYSADDR(maple_sendbuf
), MAPLE_DMAADDR
);
107 ctrl_outl(1, MAPLE_ENABLE
);
111 * maple_getcond_callback - setup handling MAPLE_COMMAND_GETCOND
112 * @dev: device responding
113 * @callback: handler callback
114 * @interval: interval in jiffies between callbacks
115 * @function: the function code for the device
117 void maple_getcond_callback(struct maple_device
*dev
,
118 void (*callback
) (struct mapleq
*mq
),
119 unsigned long interval
, unsigned long function
)
121 dev
->callback
= callback
;
122 dev
->interval
= interval
;
123 dev
->function
= cpu_to_be32(function
);
126 EXPORT_SYMBOL_GPL(maple_getcond_callback
);
128 static int maple_dma_done(void)
130 return (ctrl_inl(MAPLE_STATE
) & 1) == 0;
133 static void maple_release_device(struct device
*dev
)
135 struct maple_device
*mdev
;
139 mdev
= to_maple_dev(dev
);
143 kmem_cache_free(maple_queue_cache
, mq
->recvbufdcsp
);
151 * maple_add_packet - add a single instruction to the queue
152 * @mdev: maple device
153 * @function: function on device being queried
154 * @command: maple command to add
155 * @length: length of command string (in 32 bit words)
156 * @data: remainder of command string
158 int maple_add_packet(struct maple_device
*mdev
, u32 function
, u32 command
,
159 size_t length
, void *data
)
161 int locking
, ret
= 0;
162 void *sendbuf
= NULL
;
164 mutex_lock(&maple_wlist_lock
);
165 /* bounce if device already locked */
166 locking
= mutex_is_locked(&mdev
->mq
->mutex
);
172 mutex_lock(&mdev
->mq
->mutex
);
175 sendbuf
= kmalloc(length
* 4, GFP_KERNEL
);
177 mutex_unlock(&mdev
->mq
->mutex
);
181 ((__be32
*)sendbuf
)[0] = cpu_to_be32(function
);
184 mdev
->mq
->command
= command
;
185 mdev
->mq
->length
= length
;
187 memcpy(sendbuf
+ 4, data
, (length
- 1) * 4);
188 mdev
->mq
->sendbuf
= sendbuf
;
190 list_add(&mdev
->mq
->list
, &maple_waitq
);
192 mutex_unlock(&maple_wlist_lock
);
195 EXPORT_SYMBOL_GPL(maple_add_packet
);
198 * maple_add_packet_sleeps - add a single instruction to the queue
199 * @mdev: maple device
200 * @function: function on device being queried
201 * @command: maple command to add
202 * @length: length of command string (in 32 bit words)
203 * @data: remainder of command string
205 * Same as maple_add_packet(), but waits for the lock to become free.
207 int maple_add_packet_sleeps(struct maple_device
*mdev
, u32 function
,
208 u32 command
, size_t length
, void *data
)
210 int locking
, ret
= 0;
211 void *sendbuf
= NULL
;
213 locking
= mutex_lock_interruptible(&mdev
->mq
->mutex
);
220 sendbuf
= kmalloc(length
* 4, GFP_KERNEL
);
222 mutex_unlock(&mdev
->mq
->mutex
);
226 ((__be32
*)sendbuf
)[0] = cpu_to_be32(function
);
229 mdev
->mq
->command
= command
;
230 mdev
->mq
->length
= length
;
232 memcpy(sendbuf
+ 4, data
, (length
- 1) * 4);
233 mdev
->mq
->sendbuf
= sendbuf
;
235 mutex_lock(&maple_wlist_lock
);
236 list_add(&mdev
->mq
->list
, &maple_waitq
);
237 mutex_unlock(&maple_wlist_lock
);
241 EXPORT_SYMBOL_GPL(maple_add_packet_sleeps
);
243 static struct mapleq
*maple_allocq(struct maple_device
*mdev
)
247 mq
= kmalloc(sizeof(*mq
), GFP_KERNEL
);
252 mq
->recvbufdcsp
= kmem_cache_zalloc(maple_queue_cache
, GFP_KERNEL
);
253 mq
->recvbuf
= (void *) P2SEGADDR(mq
->recvbufdcsp
);
257 * most devices do not need the mutex - but
258 * anything that injects block reads or writes
261 mutex_init(&mq
->mutex
);
271 static struct maple_device
*maple_alloc_dev(int port
, int unit
)
273 struct maple_device
*mdev
;
275 mdev
= kzalloc(sizeof(*mdev
), GFP_KERNEL
);
281 mdev
->mq
= maple_allocq(mdev
);
287 mdev
->dev
.bus
= &maple_bus_type
;
288 mdev
->dev
.parent
= &maple_bus
;
292 static void maple_free_dev(struct maple_device
*mdev
)
297 if (mdev
->mq
->recvbufdcsp
)
298 kmem_cache_free(maple_queue_cache
,
299 mdev
->mq
->recvbufdcsp
);
305 /* process the command queue into a maple command block
306 * terminating command has bit 32 of first long set to 0
308 static void maple_build_block(struct mapleq
*mq
)
310 int port
, unit
, from
, to
, len
;
311 unsigned long *lsendbuf
= mq
->sendbuf
;
313 port
= mq
->dev
->port
& 3;
314 unit
= mq
->dev
->unit
;
317 to
= (port
<< 6) | (unit
> 0 ? (1 << (unit
- 1)) & 0x1f : 0x20);
319 *maple_lastptr
&= 0x7fffffff;
320 maple_lastptr
= maple_sendptr
;
322 *maple_sendptr
++ = (port
<< 16) | len
| 0x80000000;
323 *maple_sendptr
++ = PHYSADDR(mq
->recvbuf
);
325 mq
->command
| (to
<< 8) | (from
<< 16) | (len
<< 24);
327 *maple_sendptr
++ = *lsendbuf
++;
330 /* build up command queue */
331 static void maple_send(void)
333 int i
, maple_packets
= 0;
334 struct mapleq
*mq
, *nmq
;
336 if (!list_empty(&maple_sentq
))
338 mutex_lock(&maple_wlist_lock
);
339 if (list_empty(&maple_waitq
) || !maple_dma_done()) {
340 mutex_unlock(&maple_wlist_lock
);
343 mutex_unlock(&maple_wlist_lock
);
344 maple_lastptr
= maple_sendbuf
;
345 maple_sendptr
= maple_sendbuf
;
346 mutex_lock(&maple_wlist_lock
);
347 list_for_each_entry_safe(mq
, nmq
, &maple_waitq
, list
) {
348 maple_build_block(mq
);
349 list_move(&mq
->list
, &maple_sentq
);
350 if (maple_packets
++ > MAPLE_MAXPACKETS
)
353 mutex_unlock(&maple_wlist_lock
);
354 if (maple_packets
> 0) {
355 for (i
= 0; i
< (1 << MAPLE_DMA_PAGES
); i
++)
356 dma_cache_sync(0, maple_sendbuf
+ i
* PAGE_SIZE
,
357 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
361 /* check if there is a driver registered likely to match this device */
362 static int check_matching_maple_driver(struct device_driver
*driver
,
365 struct maple_driver
*maple_drv
;
366 struct maple_device
*mdev
;
369 maple_drv
= to_maple_driver(driver
);
370 if (mdev
->devinfo
.function
& cpu_to_be32(maple_drv
->function
))
375 static void maple_detach_driver(struct maple_device
*mdev
)
379 device_unregister(&mdev
->dev
);
383 /* process initial MAPLE_COMMAND_DEVINFO for each device or port */
384 static void maple_attach_driver(struct maple_device
*mdev
)
387 unsigned long function
;
390 recvbuf
= mdev
->mq
->recvbuf
;
391 /* copy the data as individual elements in
392 * case of memory optimisation */
393 memcpy(&mdev
->devinfo
.function
, recvbuf
+ 4, 4);
394 memcpy(&mdev
->devinfo
.function_data
[0], recvbuf
+ 8, 12);
395 memcpy(&mdev
->devinfo
.area_code
, recvbuf
+ 20, 1);
396 memcpy(&mdev
->devinfo
.connector_direction
, recvbuf
+ 21, 1);
397 memcpy(&mdev
->devinfo
.product_name
[0], recvbuf
+ 22, 30);
398 memcpy(&mdev
->devinfo
.product_licence
[0], recvbuf
+ 52, 60);
399 memcpy(&mdev
->devinfo
.standby_power
, recvbuf
+ 112, 2);
400 memcpy(&mdev
->devinfo
.max_power
, recvbuf
+ 114, 2);
401 memcpy(mdev
->product_name
, mdev
->devinfo
.product_name
, 30);
402 mdev
->product_name
[30] = '\0';
403 memcpy(mdev
->product_licence
, mdev
->devinfo
.product_licence
, 60);
404 mdev
->product_licence
[60] = '\0';
406 for (p
= mdev
->product_name
+ 29; mdev
->product_name
<= p
; p
--)
411 for (p
= mdev
->product_licence
+ 59; mdev
->product_licence
<= p
; p
--)
417 printk(KERN_INFO
"Maple device detected: %s\n",
419 printk(KERN_INFO
"Maple device: %s\n", mdev
->product_licence
);
421 function
= be32_to_cpu(mdev
->devinfo
.function
);
423 if (function
> 0x200) {
424 /* Do this silently - as not a real device */
426 mdev
->driver
= &maple_dummy_driver
;
427 sprintf(mdev
->dev
.bus_id
, "%d:0.port", mdev
->port
);
430 "Maple bus at (%d, %d): Function 0x%lX\n",
431 mdev
->port
, mdev
->unit
, function
);
434 bus_for_each_drv(&maple_bus_type
, NULL
, mdev
,
435 check_matching_maple_driver
);
438 /* Driver does not exist yet */
440 "No maple driver found.\n");
441 mdev
->driver
= &maple_dummy_driver
;
443 sprintf(mdev
->dev
.bus_id
, "%d:0%d.%lX", mdev
->port
,
444 mdev
->unit
, function
);
446 mdev
->function
= function
;
447 mdev
->dev
.release
= &maple_release_device
;
448 retval
= device_register(&mdev
->dev
);
451 "Maple bus: Attempt to register device"
452 " (%x, %x) failed.\n",
453 mdev
->port
, mdev
->unit
);
454 maple_free_dev(mdev
);
461 * if device has been registered for the given
462 * port and unit then return 1 - allows identification
463 * of which devices need to be attached or detached
465 static int detach_maple_device(struct device
*device
, void *portptr
)
467 struct maple_device_specify
*ds
;
468 struct maple_device
*mdev
;
471 mdev
= to_maple_dev(device
);
472 if (mdev
->port
== ds
->port
&& mdev
->unit
== ds
->unit
)
477 static int setup_maple_commands(struct device
*device
, void *ignored
)
480 struct maple_device
*maple_dev
= to_maple_dev(device
);
482 if ((maple_dev
->interval
> 0)
483 && time_after(jiffies
, maple_dev
->when
)) {
484 /* bounce if we cannot lock */
485 add
= maple_add_packet(maple_dev
,
486 be32_to_cpu(maple_dev
->devinfo
.function
),
487 MAPLE_COMMAND_GETCOND
, 1, NULL
);
489 maple_dev
->when
= jiffies
+ maple_dev
->interval
;
491 if (time_after(jiffies
, maple_pnp_time
))
492 /* This will also bounce */
493 maple_add_packet(maple_dev
, 0,
494 MAPLE_COMMAND_DEVINFO
, 0, NULL
);
499 /* VBLANK bottom half - implemented via workqueue */
500 static void maple_vblank_handler(struct work_struct
*work
)
502 if (!list_empty(&maple_sentq
) || !maple_dma_done())
505 ctrl_outl(0, MAPLE_ENABLE
);
507 bus_for_each_dev(&maple_bus_type
, NULL
, NULL
,
508 setup_maple_commands
);
510 if (time_after(jiffies
, maple_pnp_time
))
511 maple_pnp_time
= jiffies
+ MAPLE_PNP_INTERVAL
;
513 mutex_lock(&maple_wlist_lock
);
514 if (!list_empty(&maple_waitq
) && list_empty(&maple_sentq
)) {
515 mutex_unlock(&maple_wlist_lock
);
518 mutex_unlock(&maple_wlist_lock
);
521 maplebus_dma_reset();
524 /* handle devices added via hotplugs - placing them on queue for DEVINFO*/
525 static void maple_map_subunits(struct maple_device
*mdev
, int submask
)
527 int retval
, k
, devcheck
;
528 struct maple_device
*mdev_add
;
529 struct maple_device_specify ds
;
531 ds
.port
= mdev
->port
;
532 for (k
= 0; k
< 5; k
++) {
535 bus_for_each_dev(&maple_bus_type
, NULL
, &ds
,
536 detach_maple_device
);
538 submask
= submask
>> 1;
541 devcheck
= submask
& 0x01;
543 mdev_add
= maple_alloc_dev(mdev
->port
, k
+ 1);
546 maple_add_packet(mdev_add
, 0, MAPLE_COMMAND_DEVINFO
,
548 /* mark that we are checking sub devices */
551 submask
= submask
>> 1;
555 /* mark a device as removed */
556 static void maple_clean_submap(struct maple_device
*mdev
)
560 killbit
= (mdev
->unit
> 0 ? (1 << (mdev
->unit
- 1)) & 0x1f : 0x20);
563 subdevice_map
[mdev
->port
] = subdevice_map
[mdev
->port
] & killbit
;
566 /* handle empty port or hotplug removal */
567 static void maple_response_none(struct maple_device
*mdev
,
570 if (mdev
->unit
!= 0) {
572 maple_clean_submap(mdev
);
574 "Maple bus device detaching at (%d, %d)\n",
575 mdev
->port
, mdev
->unit
);
576 maple_detach_driver(mdev
);
579 if (!started
|| !fullscan
) {
580 if (checked
[mdev
->port
] == false) {
581 checked
[mdev
->port
] = true;
582 printk(KERN_INFO
"No maple devices attached"
583 " to port %d\n", mdev
->port
);
587 maple_clean_submap(mdev
);
590 /* preprocess hotplugs or scans */
591 static void maple_response_devinfo(struct maple_device
*mdev
,
595 if (!started
|| (scanning
== 2) || !fullscan
) {
596 if ((mdev
->unit
== 0) && (checked
[mdev
->port
] == false)) {
597 checked
[mdev
->port
] = true;
598 maple_attach_driver(mdev
);
601 maple_attach_driver(mdev
);
605 if (mdev
->unit
== 0) {
606 submask
= recvbuf
[2] & 0x1F;
607 if (submask
^ subdevice_map
[mdev
->port
]) {
608 maple_map_subunits(mdev
, submask
);
609 subdevice_map
[mdev
->port
] = submask
;
614 static void maple_port_rescan(void)
617 struct maple_device
*mdev
;
620 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
621 if (checked
[i
] == false) {
625 * test lock in case scan has failed
626 * but device is still locked
628 if (mutex_is_locked(&mdev
->mq
->mutex
))
629 mutex_unlock(&mdev
->mq
->mutex
);
630 maple_add_packet(mdev
, 0, MAPLE_COMMAND_DEVINFO
,
636 /* maple dma end bottom half - implemented via workqueue */
637 static void maple_dma_handler(struct work_struct
*work
)
639 struct mapleq
*mq
, *nmq
;
640 struct maple_device
*dev
;
642 enum maple_code code
;
644 if (!maple_dma_done())
646 ctrl_outl(0, MAPLE_ENABLE
);
647 if (!list_empty(&maple_sentq
)) {
648 list_for_each_entry_safe(mq
, nmq
, &maple_sentq
, list
) {
649 recvbuf
= mq
->recvbuf
;
653 mutex_unlock(&mq
->mutex
);
654 list_del_init(&mq
->list
);
657 case MAPLE_RESPONSE_NONE
:
658 maple_response_none(dev
, mq
);
661 case MAPLE_RESPONSE_DEVINFO
:
662 maple_response_devinfo(dev
, recvbuf
);
665 case MAPLE_RESPONSE_DATATRF
:
670 case MAPLE_RESPONSE_FILEERR
:
671 case MAPLE_RESPONSE_AGAIN
:
672 case MAPLE_RESPONSE_BADCMD
:
673 case MAPLE_RESPONSE_BADFUNC
:
675 "Maple non-fatal error 0x%X\n",
679 case MAPLE_RESPONSE_ALLINFO
:
681 "Maple - extended device information"
685 case MAPLE_RESPONSE_OK
:
692 /* if scanning is 1 then we have subdevices to check */
698 /*check if we have actually tested all ports yet */
701 /* mark that we have been through the first scan */
705 maplebus_dma_reset();
708 static irqreturn_t
maplebus_dma_interrupt(int irq
, void *dev_id
)
710 /* Load everything into the bottom half */
711 schedule_work(&maple_dma_process
);
715 static irqreturn_t
maplebus_vblank_interrupt(int irq
, void *dev_id
)
717 schedule_work(&maple_vblank_process
);
721 static int maple_set_dma_interrupt_handler(void)
723 return request_irq(HW_EVENT_MAPLE_DMA
, maplebus_dma_interrupt
,
724 IRQF_SHARED
, "maple bus DMA", &maple_dummy_driver
);
727 static int maple_set_vblank_interrupt_handler(void)
729 return request_irq(HW_EVENT_VSYNC
, maplebus_vblank_interrupt
,
730 IRQF_SHARED
, "maple bus VBLANK", &maple_dummy_driver
);
733 static int maple_get_dma_buffer(void)
736 (void *) __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
743 static int match_maple_bus_driver(struct device
*devptr
,
744 struct device_driver
*drvptr
)
746 struct maple_driver
*maple_drv
= to_maple_driver(drvptr
);
747 struct maple_device
*maple_dev
= to_maple_dev(devptr
);
749 /* Trap empty port case */
750 if (maple_dev
->devinfo
.function
== 0xFFFFFFFF)
752 else if (maple_dev
->devinfo
.function
&
753 cpu_to_be32(maple_drv
->function
))
758 static int maple_bus_uevent(struct device
*dev
,
759 struct kobj_uevent_env
*env
)
764 static void maple_bus_release(struct device
*dev
)
768 static struct maple_driver maple_dummy_driver
= {
770 .name
= "maple_dummy_driver",
771 .bus
= &maple_bus_type
,
775 struct bus_type maple_bus_type
= {
777 .match
= match_maple_bus_driver
,
778 .uevent
= maple_bus_uevent
,
780 EXPORT_SYMBOL_GPL(maple_bus_type
);
782 static struct device maple_bus
= {
784 .release
= maple_bus_release
,
787 static int __init
maple_bus_init(void)
790 struct maple_device
*mdev
[MAPLE_PORTS
];
791 ctrl_outl(0, MAPLE_STATE
);
793 retval
= device_register(&maple_bus
);
797 retval
= bus_register(&maple_bus_type
);
801 retval
= driver_register(&maple_dummy_driver
.drv
);
805 /* allocate memory for maple bus dma */
806 retval
= maple_get_dma_buffer();
809 "Maple bus: Failed to allocate Maple DMA buffers\n");
813 /* set up DMA interrupt handler */
814 retval
= maple_set_dma_interrupt_handler();
817 "Maple bus: Failed to grab maple DMA IRQ\n");
821 /* set up VBLANK interrupt handler */
822 retval
= maple_set_vblank_interrupt_handler();
824 printk(KERN_INFO
"Maple bus: Failed to grab VBLANK IRQ\n");
829 kmem_cache_create("maple_queue_cache", 0x400, 0,
830 SLAB_HWCACHE_ALIGN
, NULL
);
832 if (!maple_queue_cache
)
833 goto cleanup_bothirqs
;
835 INIT_LIST_HEAD(&maple_waitq
);
836 INIT_LIST_HEAD(&maple_sentq
);
838 /* setup maple ports */
839 for (i
= 0; i
< MAPLE_PORTS
; i
++) {
841 mdev
[i
] = maple_alloc_dev(i
, 0);
842 baseunits
[i
] = mdev
[i
];
845 maple_free_dev(mdev
[i
]);
848 maple_add_packet(mdev
[i
], 0, MAPLE_COMMAND_DEVINFO
, 0, NULL
);
849 subdevice_map
[i
] = 0;
852 /* setup maplebus hardware */
853 maplebus_dma_reset();
854 /* initial detection */
856 maple_pnp_time
= jiffies
;
857 printk(KERN_INFO
"Maple bus core now registered.\n");
862 kmem_cache_destroy(maple_queue_cache
);
865 free_irq(HW_EVENT_VSYNC
, 0);
868 free_irq(HW_EVENT_MAPLE_DMA
, 0);
871 free_pages((unsigned long) maple_sendbuf
, MAPLE_DMA_PAGES
);
874 driver_unregister(&maple_dummy_driver
.drv
);
877 bus_unregister(&maple_bus_type
);
880 device_unregister(&maple_bus
);
883 printk(KERN_INFO
"Maple bus registration failed\n");
886 /* Push init to later to ensure hardware gets detected */
887 fs_initcall(maple_bus_init
);