2 * arch/powerpc/sysdev/ipic.c
4 * IPIC routines implementations.
6 * Copyright 2005 Freescale Semiconductor, Inc.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/sched.h>
20 #include <linux/signal.h>
21 #include <linux/sysdev.h>
22 #include <linux/device.h>
23 #include <linux/bootmem.h>
24 #include <linux/spinlock.h>
32 static struct ipic
* primary_ipic
;
33 static struct irq_chip ipic_level_irq_chip
, ipic_edge_irq_chip
;
34 static DEFINE_SPINLOCK(ipic_lock
);
36 static struct ipic_info ipic_info
[] = {
40 .force
= IPIC_SIFCR_H
,
47 .force
= IPIC_SIFCR_H
,
54 .force
= IPIC_SIFCR_H
,
61 .force
= IPIC_SIFCR_H
,
68 .force
= IPIC_SIFCR_H
,
75 .force
= IPIC_SIFCR_H
,
82 .force
= IPIC_SIFCR_H
,
89 .force
= IPIC_SIFCR_H
,
96 .force
= IPIC_SIFCR_H
,
101 .mask
= IPIC_SIMSR_H
,
102 .prio
= IPIC_SIPRR_D
,
103 .force
= IPIC_SIFCR_H
,
108 .mask
= IPIC_SIMSR_H
,
109 .prio
= IPIC_SIPRR_D
,
110 .force
= IPIC_SIFCR_H
,
117 .prio
= IPIC_SMPRR_A
,
125 .prio
= IPIC_SMPRR_A
,
133 .prio
= IPIC_SMPRR_A
,
141 .prio
= IPIC_SMPRR_B
,
149 .prio
= IPIC_SMPRR_B
,
157 .prio
= IPIC_SMPRR_B
,
165 .prio
= IPIC_SMPRR_B
,
171 .mask
= IPIC_SIMSR_H
,
172 .prio
= IPIC_SIPRR_A
,
173 .force
= IPIC_SIFCR_H
,
178 .mask
= IPIC_SIMSR_H
,
179 .prio
= IPIC_SIPRR_A
,
180 .force
= IPIC_SIFCR_H
,
185 .mask
= IPIC_SIMSR_H
,
186 .prio
= IPIC_SIPRR_A
,
187 .force
= IPIC_SIFCR_H
,
192 .mask
= IPIC_SIMSR_H
,
193 .prio
= IPIC_SIPRR_A
,
194 .force
= IPIC_SIFCR_H
,
199 .mask
= IPIC_SIMSR_H
,
200 .prio
= IPIC_SIPRR_A
,
201 .force
= IPIC_SIFCR_H
,
206 .mask
= IPIC_SIMSR_H
,
207 .prio
= IPIC_SIPRR_A
,
208 .force
= IPIC_SIFCR_H
,
213 .mask
= IPIC_SIMSR_H
,
214 .prio
= IPIC_SIPRR_A
,
215 .force
= IPIC_SIFCR_H
,
220 .mask
= IPIC_SIMSR_H
,
221 .prio
= IPIC_SIPRR_A
,
222 .force
= IPIC_SIFCR_H
,
227 .mask
= IPIC_SIMSR_H
,
228 .prio
= IPIC_SIPRR_B
,
229 .force
= IPIC_SIFCR_H
,
234 .mask
= IPIC_SIMSR_H
,
235 .prio
= IPIC_SIPRR_B
,
236 .force
= IPIC_SIFCR_H
,
241 .mask
= IPIC_SIMSR_H
,
242 .prio
= IPIC_SIPRR_B
,
243 .force
= IPIC_SIFCR_H
,
248 .mask
= IPIC_SIMSR_H
,
249 .prio
= IPIC_SIPRR_B
,
250 .force
= IPIC_SIFCR_H
,
255 .mask
= IPIC_SIMSR_H
,
256 .prio
= IPIC_SIPRR_B
,
257 .force
= IPIC_SIFCR_H
,
263 .prio
= IPIC_SMPRR_A
,
269 .mask
= IPIC_SIMSR_L
,
270 .prio
= IPIC_SMPRR_A
,
271 .force
= IPIC_SIFCR_L
,
276 .mask
= IPIC_SIMSR_L
,
277 .prio
= IPIC_SMPRR_A
,
278 .force
= IPIC_SIFCR_L
,
283 .mask
= IPIC_SIMSR_L
,
284 .prio
= IPIC_SMPRR_A
,
285 .force
= IPIC_SIFCR_L
,
290 .mask
= IPIC_SIMSR_L
,
291 .prio
= IPIC_SMPRR_A
,
292 .force
= IPIC_SIFCR_L
,
297 .mask
= IPIC_SIMSR_L
,
298 .prio
= IPIC_SMPRR_B
,
299 .force
= IPIC_SIFCR_L
,
304 .mask
= IPIC_SIMSR_L
,
305 .prio
= IPIC_SMPRR_B
,
306 .force
= IPIC_SIFCR_L
,
311 .mask
= IPIC_SIMSR_L
,
312 .prio
= IPIC_SMPRR_B
,
313 .force
= IPIC_SIFCR_L
,
318 .mask
= IPIC_SIMSR_L
,
319 .prio
= IPIC_SMPRR_B
,
320 .force
= IPIC_SIFCR_L
,
325 .mask
= IPIC_SIMSR_L
,
327 .force
= IPIC_SIFCR_L
,
331 .mask
= IPIC_SIMSR_L
,
333 .force
= IPIC_SIFCR_L
,
337 .mask
= IPIC_SIMSR_L
,
339 .force
= IPIC_SIFCR_L
,
343 .mask
= IPIC_SIMSR_L
,
345 .force
= IPIC_SIFCR_L
,
349 .mask
= IPIC_SIMSR_L
,
351 .force
= IPIC_SIFCR_L
,
355 .mask
= IPIC_SIMSR_L
,
357 .force
= IPIC_SIFCR_L
,
361 .mask
= IPIC_SIMSR_L
,
363 .force
= IPIC_SIFCR_L
,
367 .mask
= IPIC_SIMSR_L
,
369 .force
= IPIC_SIFCR_L
,
373 .mask
= IPIC_SIMSR_L
,
375 .force
= IPIC_SIFCR_L
,
379 .mask
= IPIC_SIMSR_L
,
381 .force
= IPIC_SIFCR_L
,
385 .mask
= IPIC_SIMSR_L
,
387 .force
= IPIC_SIFCR_L
,
391 .mask
= IPIC_SIMSR_L
,
393 .force
= IPIC_SIFCR_L
,
397 .mask
= IPIC_SIMSR_L
,
399 .force
= IPIC_SIFCR_L
,
403 .mask
= IPIC_SIMSR_L
,
405 .force
= IPIC_SIFCR_L
,
409 .mask
= IPIC_SIMSR_L
,
411 .force
= IPIC_SIFCR_L
,
415 .mask
= IPIC_SIMSR_L
,
417 .force
= IPIC_SIFCR_L
,
421 .mask
= IPIC_SIMSR_L
,
423 .force
= IPIC_SIFCR_L
,
427 .mask
= IPIC_SIMSR_L
,
429 .force
= IPIC_SIFCR_L
,
433 .mask
= IPIC_SIMSR_L
,
435 .force
= IPIC_SIFCR_L
,
440 static inline u32
ipic_read(volatile u32 __iomem
*base
, unsigned int reg
)
442 return in_be32(base
+ (reg
>> 2));
445 static inline void ipic_write(volatile u32 __iomem
*base
, unsigned int reg
, u32 value
)
447 out_be32(base
+ (reg
>> 2), value
);
450 static inline struct ipic
* ipic_from_irq(unsigned int virq
)
455 #define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
457 static void ipic_unmask_irq(unsigned int virq
)
459 struct ipic
*ipic
= ipic_from_irq(virq
);
460 unsigned int src
= ipic_irq_to_hw(virq
);
464 spin_lock_irqsave(&ipic_lock
, flags
);
466 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
467 temp
|= (1 << (31 - ipic_info
[src
].bit
));
468 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
470 spin_unlock_irqrestore(&ipic_lock
, flags
);
473 static void ipic_mask_irq(unsigned int virq
)
475 struct ipic
*ipic
= ipic_from_irq(virq
);
476 unsigned int src
= ipic_irq_to_hw(virq
);
480 spin_lock_irqsave(&ipic_lock
, flags
);
482 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
483 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
484 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
486 /* mb() can't guarantee that masking is finished. But it does finish
487 * for nearly all cases. */
490 spin_unlock_irqrestore(&ipic_lock
, flags
);
493 static void ipic_ack_irq(unsigned int virq
)
495 struct ipic
*ipic
= ipic_from_irq(virq
);
496 unsigned int src
= ipic_irq_to_hw(virq
);
500 spin_lock_irqsave(&ipic_lock
, flags
);
502 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].ack
);
503 temp
|= (1 << (31 - ipic_info
[src
].bit
));
504 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
506 /* mb() can't guarantee that ack is finished. But it does finish
507 * for nearly all cases. */
510 spin_unlock_irqrestore(&ipic_lock
, flags
);
513 static void ipic_mask_irq_and_ack(unsigned int virq
)
515 struct ipic
*ipic
= ipic_from_irq(virq
);
516 unsigned int src
= ipic_irq_to_hw(virq
);
520 spin_lock_irqsave(&ipic_lock
, flags
);
522 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].mask
);
523 temp
&= ~(1 << (31 - ipic_info
[src
].bit
));
524 ipic_write(ipic
->regs
, ipic_info
[src
].mask
, temp
);
526 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].ack
);
527 temp
|= (1 << (31 - ipic_info
[src
].bit
));
528 ipic_write(ipic
->regs
, ipic_info
[src
].ack
, temp
);
530 /* mb() can't guarantee that ack is finished. But it does finish
531 * for nearly all cases. */
534 spin_unlock_irqrestore(&ipic_lock
, flags
);
537 static int ipic_set_irq_type(unsigned int virq
, unsigned int flow_type
)
539 struct ipic
*ipic
= ipic_from_irq(virq
);
540 unsigned int src
= ipic_irq_to_hw(virq
);
541 struct irq_desc
*desc
= get_irq_desc(virq
);
542 unsigned int vold
, vnew
, edibit
;
544 if (flow_type
== IRQ_TYPE_NONE
)
545 flow_type
= IRQ_TYPE_LEVEL_LOW
;
547 /* ipic supports only low assertion and high-to-low change senses
549 if (!(flow_type
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_EDGE_FALLING
))) {
550 printk(KERN_ERR
"ipic: sense type 0x%x not supported\n",
554 /* ipic supports only edge mode on external interrupts */
555 if ((flow_type
& IRQ_TYPE_EDGE_FALLING
) && !ipic_info
[src
].ack
) {
556 printk(KERN_ERR
"ipic: edge sense not supported on internal "
561 desc
->status
&= ~(IRQ_TYPE_SENSE_MASK
| IRQ_LEVEL
);
562 desc
->status
|= flow_type
& IRQ_TYPE_SENSE_MASK
;
563 if (flow_type
& IRQ_TYPE_LEVEL_LOW
) {
564 desc
->status
|= IRQ_LEVEL
;
565 desc
->handle_irq
= handle_level_irq
;
566 desc
->chip
= &ipic_level_irq_chip
;
568 desc
->handle_irq
= handle_edge_irq
;
569 desc
->chip
= &ipic_edge_irq_chip
;
572 /* only EXT IRQ senses are programmable on ipic
573 * internal IRQ senses are LEVEL_LOW
575 if (src
== IPIC_IRQ_EXT0
)
578 if (src
>= IPIC_IRQ_EXT1
&& src
<= IPIC_IRQ_EXT7
)
579 edibit
= (14 - (src
- IPIC_IRQ_EXT1
));
581 return (flow_type
& IRQ_TYPE_LEVEL_LOW
) ? 0 : -EINVAL
;
583 vold
= ipic_read(ipic
->regs
, IPIC_SECNR
);
584 if ((flow_type
& IRQ_TYPE_SENSE_MASK
) == IRQ_TYPE_EDGE_FALLING
) {
585 vnew
= vold
| (1 << edibit
);
587 vnew
= vold
& ~(1 << edibit
);
590 ipic_write(ipic
->regs
, IPIC_SECNR
, vnew
);
594 /* level interrupts and edge interrupts have different ack operations */
595 static struct irq_chip ipic_level_irq_chip
= {
596 .typename
= " IPIC ",
597 .unmask
= ipic_unmask_irq
,
598 .mask
= ipic_mask_irq
,
599 .mask_ack
= ipic_mask_irq
,
600 .set_type
= ipic_set_irq_type
,
603 static struct irq_chip ipic_edge_irq_chip
= {
604 .typename
= " IPIC ",
605 .unmask
= ipic_unmask_irq
,
606 .mask
= ipic_mask_irq
,
607 .mask_ack
= ipic_mask_irq_and_ack
,
609 .set_type
= ipic_set_irq_type
,
612 static int ipic_host_match(struct irq_host
*h
, struct device_node
*node
)
614 /* Exact match, unless ipic node is NULL */
615 return h
->of_node
== NULL
|| h
->of_node
== node
;
618 static int ipic_host_map(struct irq_host
*h
, unsigned int virq
,
621 struct ipic
*ipic
= h
->host_data
;
623 set_irq_chip_data(virq
, ipic
);
624 set_irq_chip_and_handler(virq
, &ipic_level_irq_chip
, handle_level_irq
);
626 /* Set default irq type */
627 set_irq_type(virq
, IRQ_TYPE_NONE
);
632 static int ipic_host_xlate(struct irq_host
*h
, struct device_node
*ct
,
633 u32
*intspec
, unsigned int intsize
,
634 irq_hw_number_t
*out_hwirq
, unsigned int *out_flags
)
637 /* interrupt sense values coming from the device tree equal either
638 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
640 *out_hwirq
= intspec
[0];
642 *out_flags
= intspec
[1];
644 *out_flags
= IRQ_TYPE_NONE
;
648 static struct irq_host_ops ipic_host_ops
= {
649 .match
= ipic_host_match
,
650 .map
= ipic_host_map
,
651 .xlate
= ipic_host_xlate
,
654 struct ipic
* __init
ipic_init(struct device_node
*node
, unsigned int flags
)
660 ipic
= alloc_bootmem(sizeof(struct ipic
));
664 memset(ipic
, 0, sizeof(struct ipic
));
666 ipic
->irqhost
= irq_alloc_host(of_node_get(node
), IRQ_HOST_MAP_LINEAR
,
669 if (ipic
->irqhost
== NULL
) {
674 ret
= of_address_to_resource(node
, 0, &res
);
680 ipic
->regs
= ioremap(res
.start
, res
.end
- res
.start
+ 1);
682 ipic
->irqhost
->host_data
= ipic
;
685 ipic_write(ipic
->regs
, IPIC_SICNR
, 0x0);
687 /* default priority scheme is grouped. If spread mode is required
688 * configure SICFR accordingly */
689 if (flags
& IPIC_SPREADMODE_GRP_A
)
691 if (flags
& IPIC_SPREADMODE_GRP_B
)
693 if (flags
& IPIC_SPREADMODE_GRP_C
)
695 if (flags
& IPIC_SPREADMODE_GRP_D
)
697 if (flags
& IPIC_SPREADMODE_MIX_A
)
699 if (flags
& IPIC_SPREADMODE_MIX_B
)
702 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
704 /* handle MCP route */
706 if (flags
& IPIC_DISABLE_MCP_OUT
)
708 ipic_write(ipic
->regs
, IPIC_SERCR
, temp
);
710 /* handle routing of IRQ0 to MCP */
711 temp
= ipic_read(ipic
->regs
, IPIC_SEMSR
);
713 if (flags
& IPIC_IRQ0_MCP
)
716 temp
&= ~SEMSR_SIRQ0
;
718 ipic_write(ipic
->regs
, IPIC_SEMSR
, temp
);
721 irq_set_default_host(primary_ipic
->irqhost
);
723 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS
,
729 int ipic_set_priority(unsigned int virq
, unsigned int priority
)
731 struct ipic
*ipic
= ipic_from_irq(virq
);
732 unsigned int src
= ipic_irq_to_hw(virq
);
739 if (ipic_info
[src
].prio
== 0)
742 temp
= ipic_read(ipic
->regs
, ipic_info
[src
].prio
);
745 temp
&= ~(0x7 << (20 + (3 - priority
) * 3));
746 temp
|= ipic_info
[src
].prio_mask
<< (20 + (3 - priority
) * 3);
748 temp
&= ~(0x7 << (4 + (7 - priority
) * 3));
749 temp
|= ipic_info
[src
].prio_mask
<< (4 + (7 - priority
) * 3);
752 ipic_write(ipic
->regs
, ipic_info
[src
].prio
, temp
);
757 void ipic_set_highest_priority(unsigned int virq
)
759 struct ipic
*ipic
= ipic_from_irq(virq
);
760 unsigned int src
= ipic_irq_to_hw(virq
);
763 temp
= ipic_read(ipic
->regs
, IPIC_SICFR
);
765 /* clear and set HPI */
767 temp
|= (src
& 0x7f) << 24;
769 ipic_write(ipic
->regs
, IPIC_SICFR
, temp
);
772 void ipic_set_default_priority(void)
774 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_A
, IPIC_PRIORITY_DEFAULT
);
775 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_B
, IPIC_PRIORITY_DEFAULT
);
776 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_C
, IPIC_PRIORITY_DEFAULT
);
777 ipic_write(primary_ipic
->regs
, IPIC_SIPRR_D
, IPIC_PRIORITY_DEFAULT
);
778 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_A
, IPIC_PRIORITY_DEFAULT
);
779 ipic_write(primary_ipic
->regs
, IPIC_SMPRR_B
, IPIC_PRIORITY_DEFAULT
);
782 void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq
)
784 struct ipic
*ipic
= primary_ipic
;
787 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
788 temp
|= (1 << (31 - mcp_irq
));
789 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
792 void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq
)
794 struct ipic
*ipic
= primary_ipic
;
797 temp
= ipic_read(ipic
->regs
, IPIC_SERMR
);
798 temp
&= (1 << (31 - mcp_irq
));
799 ipic_write(ipic
->regs
, IPIC_SERMR
, temp
);
802 u32
ipic_get_mcp_status(void)
804 return ipic_read(primary_ipic
->regs
, IPIC_SERMR
);
807 void ipic_clear_mcp_status(u32 mask
)
809 ipic_write(primary_ipic
->regs
, IPIC_SERMR
, mask
);
812 /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
813 unsigned int ipic_get_irq(void)
817 BUG_ON(primary_ipic
== NULL
);
819 #define IPIC_SIVCR_VECTOR_MASK 0x7f
820 irq
= ipic_read(primary_ipic
->regs
, IPIC_SIVCR
) & IPIC_SIVCR_VECTOR_MASK
;
822 if (irq
== 0) /* 0 --> no irq is pending */
825 return irq_linear_revmap(primary_ipic
->irqhost
, irq
);
828 static struct sysdev_class ipic_sysclass
= {
829 set_kset_name("ipic"),
832 static struct sys_device device_ipic
= {
834 .cls
= &ipic_sysclass
,
837 static int __init
init_ipic_sysfs(void)
841 if (!primary_ipic
->regs
)
843 printk(KERN_DEBUG
"Registering ipic with sysfs...\n");
845 rc
= sysdev_class_register(&ipic_sysclass
);
847 printk(KERN_ERR
"Failed registering ipic sys class\n");
850 rc
= sysdev_register(&device_ipic
);
852 printk(KERN_ERR
"Failed registering ipic sys device\n");
858 subsys_initcall(init_ipic_sysfs
);