2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/stddef.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/bootmem.h>
14 #include <linux/seq_file.h>
15 #include <linux/proc_fs.h>
16 #include <linux/module.h>
18 #include <asm/system.h>
20 #include <asm/iseries/it_lp_queue.h>
21 #include <asm/iseries/hv_lp_event.h>
22 #include <asm/iseries/hv_call_event.h>
23 #include <asm/iseries/it_lp_naca.h>
26 * The LpQueue is used to pass event data from the hypervisor to
27 * the partition. This is where I/O interrupt events are communicated.
29 * It is written to by the hypervisor so cannot end up in the BSS.
31 struct hvlpevent_queue hvlpevent_queue
__attribute__((__section__(".data")));
33 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes
], hvlpevent_counts
);
35 static char *event_types
[HvLpEvent_Type_NumTypes
] = {
47 /* Array of LpEvent handler functions */
48 static LpEventHandler lpEventHandler
[HvLpEvent_Type_NumTypes
];
49 static unsigned lpEventHandlerPaths
[HvLpEvent_Type_NumTypes
];
51 static struct HvLpEvent
* get_next_hvlpevent(void)
53 struct HvLpEvent
* event
;
54 event
= (struct HvLpEvent
*)hvlpevent_queue
.hq_current_event
;
56 if (hvlpevent_is_valid(event
)) {
57 /* rmb() needed only for weakly consistent machines (regatta) */
59 /* Set pointer to next potential event */
60 hvlpevent_queue
.hq_current_event
+= ((event
->xSizeMinus1
+
61 IT_LP_EVENT_ALIGN
) / IT_LP_EVENT_ALIGN
) *
64 /* Wrap to beginning if no room at end */
65 if (hvlpevent_queue
.hq_current_event
>
66 hvlpevent_queue
.hq_last_event
) {
67 hvlpevent_queue
.hq_current_event
=
68 hvlpevent_queue
.hq_event_stack
;
77 static unsigned long spread_lpevents
= NR_CPUS
;
79 int hvlpevent_is_pending(void)
81 struct HvLpEvent
*next_event
;
83 if (smp_processor_id() >= spread_lpevents
)
86 next_event
= (struct HvLpEvent
*)hvlpevent_queue
.hq_current_event
;
88 return hvlpevent_is_valid(next_event
) ||
89 hvlpevent_queue
.hq_overflow_pending
;
92 static void hvlpevent_clear_valid(struct HvLpEvent
* event
)
94 /* Tell the Hypervisor that we're done with this event.
95 * Also clear bits within this event that might look like valid bits.
96 * ie. on 64-byte boundaries.
98 struct HvLpEvent
*tmp
;
99 unsigned extra
= ((event
->xSizeMinus1
+ IT_LP_EVENT_ALIGN
) /
100 IT_LP_EVENT_ALIGN
) - 1;
104 tmp
= (struct HvLpEvent
*)((char*)event
+ 3 * IT_LP_EVENT_ALIGN
);
105 hvlpevent_invalidate(tmp
);
107 tmp
= (struct HvLpEvent
*)((char*)event
+ 2 * IT_LP_EVENT_ALIGN
);
108 hvlpevent_invalidate(tmp
);
110 tmp
= (struct HvLpEvent
*)((char*)event
+ 1 * IT_LP_EVENT_ALIGN
);
111 hvlpevent_invalidate(tmp
);
116 hvlpevent_invalidate(event
);
119 void process_hvlpevents(struct pt_regs
*regs
)
121 struct HvLpEvent
* event
;
123 /* If we have recursed, just return */
124 if (!spin_trylock(&hvlpevent_queue
.hq_lock
))
128 event
= get_next_hvlpevent();
130 /* Call appropriate handler here, passing
131 * a pointer to the LpEvent. The handler
132 * must make a copy of the LpEvent if it
133 * needs it in a bottom half. (perhaps for
136 * Handlers are responsible for ACK processing
138 * The Hypervisor guarantees that LpEvents will
139 * only be delivered with types that we have
140 * registered for, so no type check is necessary
143 if (event
->xType
< HvLpEvent_Type_NumTypes
)
144 __get_cpu_var(hvlpevent_counts
)[event
->xType
]++;
145 if (event
->xType
< HvLpEvent_Type_NumTypes
&&
146 lpEventHandler
[event
->xType
])
147 lpEventHandler
[event
->xType
](event
, regs
);
149 printk(KERN_INFO
"Unexpected Lp Event type=%d\n", event
->xType
);
151 hvlpevent_clear_valid(event
);
152 } else if (hvlpevent_queue
.hq_overflow_pending
)
154 * No more valid events. If overflow events are
155 * pending process them
157 HvCallEvent_getOverflowLpEvents(hvlpevent_queue
.hq_index
);
162 spin_unlock(&hvlpevent_queue
.hq_lock
);
165 static int set_spread_lpevents(char *str
)
167 unsigned long val
= simple_strtoul(str
, NULL
, 0);
170 * The parameter is the number of processors to share in processing
173 if (( val
> 0) && (val
<= NR_CPUS
)) {
174 spread_lpevents
= val
;
175 printk("lpevent processing spread over %ld processors\n", val
);
177 printk("invalid spread_lpevents %ld\n", val
);
182 __setup("spread_lpevents=", set_spread_lpevents
);
184 void setup_hvlpevent_queue(void)
188 spin_lock_init(&hvlpevent_queue
.hq_lock
);
190 /* Allocate a page for the Event Stack. */
191 eventStack
= alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE
);
192 memset(eventStack
, 0, IT_LP_EVENT_STACK_SIZE
);
194 /* Invoke the hypervisor to initialize the event stack */
195 HvCallEvent_setLpEventStack(0, eventStack
, IT_LP_EVENT_STACK_SIZE
);
197 hvlpevent_queue
.hq_event_stack
= eventStack
;
198 hvlpevent_queue
.hq_current_event
= eventStack
;
199 hvlpevent_queue
.hq_last_event
= (char *)eventStack
+
200 (IT_LP_EVENT_STACK_SIZE
- IT_LP_EVENT_MAX_SIZE
);
201 hvlpevent_queue
.hq_index
= 0;
204 /* Register a handler for an LpEvent type */
205 int HvLpEvent_registerHandler(HvLpEvent_Type eventType
, LpEventHandler handler
)
207 if (eventType
< HvLpEvent_Type_NumTypes
) {
208 lpEventHandler
[eventType
] = handler
;
213 EXPORT_SYMBOL(HvLpEvent_registerHandler
);
215 int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType
)
219 if (eventType
< HvLpEvent_Type_NumTypes
) {
220 if (!lpEventHandlerPaths
[eventType
]) {
221 lpEventHandler
[eventType
] = NULL
;
223 * We now sleep until all other CPUs have scheduled.
224 * This ensures that the deletion is seen by all
225 * other CPUs, and that the deleted handler isn't
226 * still running on another CPU when we return.
234 EXPORT_SYMBOL(HvLpEvent_unregisterHandler
);
237 * lpIndex is the partition index of the target partition.
238 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
239 * indicates to use our partition index - for the other types.
241 int HvLpEvent_openPath(HvLpEvent_Type eventType
, HvLpIndex lpIndex
)
243 if ((eventType
< HvLpEvent_Type_NumTypes
) &&
244 lpEventHandler
[eventType
]) {
246 lpIndex
= itLpNaca
.xLpIndex
;
247 HvCallEvent_openLpEventPath(lpIndex
, eventType
);
248 ++lpEventHandlerPaths
[eventType
];
254 int HvLpEvent_closePath(HvLpEvent_Type eventType
, HvLpIndex lpIndex
)
256 if ((eventType
< HvLpEvent_Type_NumTypes
) &&
257 lpEventHandler
[eventType
] &&
258 lpEventHandlerPaths
[eventType
]) {
260 lpIndex
= itLpNaca
.xLpIndex
;
261 HvCallEvent_closeLpEventPath(lpIndex
, eventType
);
262 --lpEventHandlerPaths
[eventType
];
268 static int proc_lpevents_show(struct seq_file
*m
, void *v
)
272 static unsigned long cpu_totals
[NR_CPUS
];
274 /* FIXME: do we care that there's no locking here? */
276 for_each_online_cpu(cpu
) {
278 for (i
= 0; i
< HvLpEvent_Type_NumTypes
; i
++) {
279 cpu_totals
[cpu
] += per_cpu(hvlpevent_counts
, cpu
)[i
];
281 sum
+= cpu_totals
[cpu
];
284 seq_printf(m
, "LpEventQueue 0\n");
285 seq_printf(m
, " events processed:\t%lu\n", sum
);
287 for (i
= 0; i
< HvLpEvent_Type_NumTypes
; ++i
) {
289 for_each_online_cpu(cpu
) {
290 sum
+= per_cpu(hvlpevent_counts
, cpu
)[i
];
293 seq_printf(m
, " %-20s %10lu\n", event_types
[i
], sum
);
296 seq_printf(m
, "\n events processed by processor:\n");
298 for_each_online_cpu(cpu
) {
299 seq_printf(m
, " CPU%02d %10lu\n", cpu
, cpu_totals
[cpu
]);
305 static int proc_lpevents_open(struct inode
*inode
, struct file
*file
)
307 return single_open(file
, proc_lpevents_show
, NULL
);
310 static struct file_operations proc_lpevents_operations
= {
311 .open
= proc_lpevents_open
,
314 .release
= single_release
,
317 static int __init
proc_lpevents_init(void)
319 struct proc_dir_entry
*e
;
321 e
= create_proc_entry("iSeries/lpevents", S_IFREG
|S_IRUGO
, NULL
);
323 e
->proc_fops
= &proc_lpevents_operations
;
327 __initcall(proc_lpevents_init
);