2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) support - standard version.
12 * XPC provides a message passing capability that crosses partition
13 * boundaries. This module is made up of two parts:
15 * partition This part detects the presence/absence of other
16 * partitions. It provides a heartbeat and monitors
17 * the heartbeats of other partitions.
19 * channel This part manages the channels and sends/receives
20 * messages across them to/from other partitions.
22 * There are a couple of additional functions residing in XP, which
23 * provide an interface to XPC for its users.
28 * . We currently have no way to determine which nasid an IPI came
29 * from. Thus, xpc_IPI_send() does a remote AMO write followed by
30 * an IPI. The AMO indicates where data is to be pulled from, so
31 * after the IPI arrives, the remote partition checks the AMO word.
32 * The IPI can actually arrive before the AMO however, so other code
33 * must periodically check for this case. Also, remote AMO operations
34 * do not reliably time out. Thus we do a remote PIO read solely to
35 * know whether the remote partition is down and whether we should
36 * stop sending IPIs to it. This remote PIO read operation is set up
37 * in a special nofault region so SAL knows to ignore (and cleanup)
38 * any errors due to the remote AMO write, PIO read, and/or PIO
41 * If/when new hardware solves this IPI problem, we should abandon
42 * the current approach.
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49 #include <linux/cache.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/reboot.h>
53 #include <linux/completion.h>
54 #include <linux/kdebug.h>
55 #include <linux/kthread.h>
56 #include <linux/uaccess.h>
57 #include <asm/sn/intr.h>
58 #include <asm/sn/sn_sal.h>
61 /* define two XPC debug device structures to be used with dev_dbg() et al */
63 struct device_driver xpc_dbg_name
= {
67 struct device xpc_part_dbg_subname
= {
68 .bus_id
= {0}, /* set to "part" at xpc_init() time */
69 .driver
= &xpc_dbg_name
72 struct device xpc_chan_dbg_subname
= {
73 .bus_id
= {0}, /* set to "chan" at xpc_init() time */
74 .driver
= &xpc_dbg_name
77 struct device
*xpc_part
= &xpc_part_dbg_subname
;
78 struct device
*xpc_chan
= &xpc_chan_dbg_subname
;
80 static int xpc_kdebug_ignore
;
82 /* systune related variables for /proc/sys directories */
84 static int xpc_hb_interval
= XPC_HB_DEFAULT_INTERVAL
;
85 static int xpc_hb_min_interval
= 1;
86 static int xpc_hb_max_interval
= 10;
88 static int xpc_hb_check_interval
= XPC_HB_CHECK_DEFAULT_INTERVAL
;
89 static int xpc_hb_check_min_interval
= 10;
90 static int xpc_hb_check_max_interval
= 120;
92 int xpc_disengage_request_timelimit
= XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT
;
93 static int xpc_disengage_request_min_timelimit
; /* = 0 */
94 static int xpc_disengage_request_max_timelimit
= 120;
96 static ctl_table xpc_sys_xpc_hb_dir
[] = {
98 .ctl_name
= CTL_UNNUMBERED
,
99 .procname
= "hb_interval",
100 .data
= &xpc_hb_interval
,
101 .maxlen
= sizeof(int),
103 .proc_handler
= &proc_dointvec_minmax
,
104 .strategy
= &sysctl_intvec
,
105 .extra1
= &xpc_hb_min_interval
,
106 .extra2
= &xpc_hb_max_interval
},
108 .ctl_name
= CTL_UNNUMBERED
,
109 .procname
= "hb_check_interval",
110 .data
= &xpc_hb_check_interval
,
111 .maxlen
= sizeof(int),
113 .proc_handler
= &proc_dointvec_minmax
,
114 .strategy
= &sysctl_intvec
,
115 .extra1
= &xpc_hb_check_min_interval
,
116 .extra2
= &xpc_hb_check_max_interval
},
119 static ctl_table xpc_sys_xpc_dir
[] = {
121 .ctl_name
= CTL_UNNUMBERED
,
124 .child
= xpc_sys_xpc_hb_dir
},
126 .ctl_name
= CTL_UNNUMBERED
,
127 .procname
= "disengage_request_timelimit",
128 .data
= &xpc_disengage_request_timelimit
,
129 .maxlen
= sizeof(int),
131 .proc_handler
= &proc_dointvec_minmax
,
132 .strategy
= &sysctl_intvec
,
133 .extra1
= &xpc_disengage_request_min_timelimit
,
134 .extra2
= &xpc_disengage_request_max_timelimit
},
137 static ctl_table xpc_sys_dir
[] = {
139 .ctl_name
= CTL_UNNUMBERED
,
142 .child
= xpc_sys_xpc_dir
},
145 static struct ctl_table_header
*xpc_sysctl
;
147 /* non-zero if any remote partition disengage request was timed out */
148 int xpc_disengage_request_timedout
;
150 /* #of IRQs received */
151 static atomic_t xpc_act_IRQ_rcvd
;
153 /* IRQ handler notifies this wait queue on receipt of an IRQ */
154 static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq
);
156 static unsigned long xpc_hb_check_timeout
;
158 /* notification that the xpc_hb_checker thread has exited */
159 static DECLARE_COMPLETION(xpc_hb_checker_exited
);
161 /* notification that the xpc_discovery thread has exited */
162 static DECLARE_COMPLETION(xpc_discovery_exited
);
164 static struct timer_list xpc_hb_timer
;
166 static void xpc_kthread_waitmsgs(struct xpc_partition
*, struct xpc_channel
*);
168 static int xpc_system_reboot(struct notifier_block
*, unsigned long, void *);
169 static struct notifier_block xpc_reboot_notifier
= {
170 .notifier_call
= xpc_system_reboot
,
173 static int xpc_system_die(struct notifier_block
*, unsigned long, void *);
174 static struct notifier_block xpc_die_notifier
= {
175 .notifier_call
= xpc_system_die
,
178 enum xp_retval (*xpc_rsvd_page_init
) (struct xpc_rsvd_page
*rp
);
181 * Timer function to enforce the timelimit on the partition disengage request.
184 xpc_timeout_partition_disengage_request(unsigned long data
)
186 struct xpc_partition
*part
= (struct xpc_partition
*)data
;
188 DBUG_ON(time_before(jiffies
, part
->disengage_request_timeout
));
190 (void)xpc_partition_disengaged(part
);
192 DBUG_ON(part
->disengage_request_timeout
!= 0);
193 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part
)) != 0);
197 * Notify the heartbeat check thread that an IRQ has been received.
200 xpc_act_IRQ_handler(int irq
, void *dev_id
)
202 atomic_inc(&xpc_act_IRQ_rcvd
);
203 wake_up_interruptible(&xpc_act_IRQ_wq
);
208 * Timer to produce the heartbeat. The timer structures function is
209 * already set when this is initially called. A tunable is used to
210 * specify when the next timeout should occur.
213 xpc_hb_beater(unsigned long dummy
)
215 xpc_vars
->heartbeat
++;
217 if (time_after_eq(jiffies
, xpc_hb_check_timeout
))
218 wake_up_interruptible(&xpc_act_IRQ_wq
);
220 xpc_hb_timer
.expires
= jiffies
+ (xpc_hb_interval
* HZ
);
221 add_timer(&xpc_hb_timer
);
225 * This thread is responsible for nearly all of the partition
226 * activation/deactivation.
229 xpc_hb_checker(void *ignore
)
231 int last_IRQ_count
= 0;
235 /* this thread was marked active by xpc_hb_init() */
237 set_cpus_allowed_ptr(current
, &cpumask_of_cpu(XPC_HB_CHECK_CPU
));
239 /* set our heartbeating to other partitions into motion */
240 xpc_hb_check_timeout
= jiffies
+ (xpc_hb_check_interval
* HZ
);
243 while (!xpc_exiting
) {
245 dev_dbg(xpc_part
, "woke up with %d ticks rem; %d IRQs have "
247 (int)(xpc_hb_check_timeout
- jiffies
),
248 atomic_read(&xpc_act_IRQ_rcvd
) - last_IRQ_count
);
250 /* checking of remote heartbeats is skewed by IRQ handling */
251 if (time_after_eq(jiffies
, xpc_hb_check_timeout
)) {
252 dev_dbg(xpc_part
, "checking remote heartbeats\n");
253 xpc_check_remote_hb();
256 * We need to periodically recheck to ensure no
257 * IPI/AMO pairs have been missed. That check
258 * must always reset xpc_hb_check_timeout.
263 /* check for outstanding IRQs */
264 new_IRQ_count
= atomic_read(&xpc_act_IRQ_rcvd
);
265 if (last_IRQ_count
< new_IRQ_count
|| force_IRQ
!= 0) {
268 dev_dbg(xpc_part
, "found an IRQ to process; will be "
269 "resetting xpc_hb_check_timeout\n");
271 last_IRQ_count
+= xpc_identify_act_IRQ_sender();
272 if (last_IRQ_count
< new_IRQ_count
) {
273 /* retry once to help avoid missing AMO */
274 (void)xpc_identify_act_IRQ_sender();
276 last_IRQ_count
= new_IRQ_count
;
278 xpc_hb_check_timeout
= jiffies
+
279 (xpc_hb_check_interval
* HZ
);
282 /* wait for IRQ or timeout */
283 (void)wait_event_interruptible(xpc_act_IRQ_wq
,
285 atomic_read(&xpc_act_IRQ_rcvd
)
286 || time_after_eq(jiffies
,
287 xpc_hb_check_timeout
) ||
291 dev_dbg(xpc_part
, "heartbeat checker is exiting\n");
293 /* mark this thread as having exited */
294 complete(&xpc_hb_checker_exited
);
299 * This thread will attempt to discover other partitions to activate
300 * based on info provided by SAL. This new thread is short lived and
301 * will exit once discovery is complete.
304 xpc_initiate_discovery(void *ignore
)
308 dev_dbg(xpc_part
, "discovery thread is exiting\n");
310 /* mark this thread as having exited */
311 complete(&xpc_discovery_exited
);
316 * Establish first contact with the remote partititon. This involves pulling
317 * the XPC per partition variables from the remote partition and waiting for
318 * the remote partition to pull ours.
320 static enum xp_retval
321 xpc_make_first_contact(struct xpc_partition
*part
)
325 while ((ret
= xpc_pull_remote_vars_part(part
)) != xpSuccess
) {
326 if (ret
!= xpRetry
) {
327 XPC_DEACTIVATE_PARTITION(part
, ret
);
331 dev_dbg(xpc_chan
, "waiting to make first contact with "
332 "partition %d\n", XPC_PARTID(part
));
334 /* wait a 1/4 of a second or so */
335 (void)msleep_interruptible(250);
337 if (part
->act_state
== XPC_P_DEACTIVATING
)
341 return xpc_mark_partition_active(part
);
345 * The first kthread assigned to a newly activated partition is the one
346 * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
347 * that kthread until the partition is brought down, at which time that kthread
348 * returns back to XPC HB. (The return of that kthread will signify to XPC HB
349 * that XPC has dismantled all communication infrastructure for the associated
350 * partition.) This kthread becomes the channel manager for that partition.
352 * Each active partition has a channel manager, who, besides connecting and
353 * disconnecting channels, will ensure that each of the partition's connected
354 * channels has the required number of assigned kthreads to get the work done.
357 xpc_channel_mgr(struct xpc_partition
*part
)
359 while (part
->act_state
!= XPC_P_DEACTIVATING
||
360 atomic_read(&part
->nchannels_active
) > 0 ||
361 !xpc_partition_disengaged(part
)) {
363 xpc_process_channel_activity(part
);
366 * Wait until we've been requested to activate kthreads or
367 * all of the channel's message queues have been torn down or
368 * a signal is pending.
370 * The channel_mgr_requests is set to 1 after being awakened,
371 * This is done to prevent the channel mgr from making one pass
372 * through the loop for each request, since he will
373 * be servicing all the requests in one pass. The reason it's
374 * set to 1 instead of 0 is so that other kthreads will know
375 * that the channel mgr is running and won't bother trying to
378 atomic_dec(&part
->channel_mgr_requests
);
379 (void)wait_event_interruptible(part
->channel_mgr_wq
,
380 (atomic_read(&part
->channel_mgr_requests
) > 0 ||
381 part
->local_IPI_amo
!= 0 ||
382 (part
->act_state
== XPC_P_DEACTIVATING
&&
383 atomic_read(&part
->nchannels_active
) == 0 &&
384 xpc_partition_disengaged(part
))));
385 atomic_set(&part
->channel_mgr_requests
, 1);
390 * When XPC HB determines that a partition has come up, it will create a new
391 * kthread and that kthread will call this function to attempt to set up the
392 * basic infrastructure used for Cross Partition Communication with the newly
395 * The kthread that was created by XPC HB and which setup the XPC
396 * infrastructure will remain assigned to the partition until the partition
397 * goes down. At which time the kthread will teardown the XPC infrastructure
400 * XPC HB will put the remote partition's XPC per partition specific variables
401 * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
402 * calling xpc_partition_up().
405 xpc_partition_up(struct xpc_partition
*part
)
407 DBUG_ON(part
->channels
!= NULL
);
409 dev_dbg(xpc_chan
, "activating partition %d\n", XPC_PARTID(part
));
411 if (xpc_setup_infrastructure(part
) != xpSuccess
)
415 * The kthread that XPC HB called us with will become the
416 * channel manager for this partition. It will not return
417 * back to XPC HB until the partition's XPC infrastructure
418 * has been dismantled.
421 (void)xpc_part_ref(part
); /* this will always succeed */
423 if (xpc_make_first_contact(part
) == xpSuccess
)
424 xpc_channel_mgr(part
);
426 xpc_part_deref(part
);
428 xpc_teardown_infrastructure(part
);
432 xpc_activating(void *__partid
)
434 short partid
= (u64
)__partid
;
435 struct xpc_partition
*part
= &xpc_partitions
[partid
];
436 unsigned long irq_flags
;
438 DBUG_ON(partid
< 0 || partid
>= xp_max_npartitions
);
440 spin_lock_irqsave(&part
->act_lock
, irq_flags
);
442 if (part
->act_state
== XPC_P_DEACTIVATING
) {
443 part
->act_state
= XPC_P_INACTIVE
;
444 spin_unlock_irqrestore(&part
->act_lock
, irq_flags
);
445 part
->remote_rp_pa
= 0;
449 /* indicate the thread is activating */
450 DBUG_ON(part
->act_state
!= XPC_P_ACTIVATION_REQ
);
451 part
->act_state
= XPC_P_ACTIVATING
;
453 XPC_SET_REASON(part
, 0, 0);
454 spin_unlock_irqrestore(&part
->act_lock
, irq_flags
);
456 dev_dbg(xpc_part
, "bringing partition %d up\n", partid
);
459 * Register the remote partition's AMOs with SAL so it can handle
460 * and cleanup errors within that address range should the remote
461 * partition go down. We don't unregister this range because it is
462 * difficult to tell when outstanding writes to the remote partition
463 * are finished and thus when it is safe to unregister. This should
464 * not result in wasted space in the SAL xp_addr_region table because
465 * we should get the same page for remote_amos_page_pa after module
466 * reloads and system reboots.
468 if (sn_register_xp_addr_region(part
->remote_amos_page_pa
,
470 dev_warn(xpc_part
, "xpc_partition_up(%d) failed to register "
471 "xp_addr region\n", partid
);
473 spin_lock_irqsave(&part
->act_lock
, irq_flags
);
474 part
->act_state
= XPC_P_INACTIVE
;
475 XPC_SET_REASON(part
, xpPhysAddrRegFailed
, __LINE__
);
476 spin_unlock_irqrestore(&part
->act_lock
, irq_flags
);
477 part
->remote_rp_pa
= 0;
481 xpc_allow_hb(partid
, xpc_vars
);
482 xpc_IPI_send_activated(part
);
485 * xpc_partition_up() holds this thread and marks this partition as
486 * XPC_P_ACTIVE by calling xpc_hb_mark_active().
488 (void)xpc_partition_up(part
);
490 xpc_disallow_hb(partid
, xpc_vars
);
491 xpc_mark_partition_inactive(part
);
493 if (part
->reason
== xpReactivating
) {
494 /* interrupting ourselves results in activating partition */
495 xpc_IPI_send_reactivate(part
);
502 xpc_activate_partition(struct xpc_partition
*part
)
504 short partid
= XPC_PARTID(part
);
505 unsigned long irq_flags
;
506 struct task_struct
*kthread
;
508 spin_lock_irqsave(&part
->act_lock
, irq_flags
);
510 DBUG_ON(part
->act_state
!= XPC_P_INACTIVE
);
512 part
->act_state
= XPC_P_ACTIVATION_REQ
;
513 XPC_SET_REASON(part
, xpCloneKThread
, __LINE__
);
515 spin_unlock_irqrestore(&part
->act_lock
, irq_flags
);
517 kthread
= kthread_run(xpc_activating
, (void *)((u64
)partid
), "xpc%02d",
519 if (IS_ERR(kthread
)) {
520 spin_lock_irqsave(&part
->act_lock
, irq_flags
);
521 part
->act_state
= XPC_P_INACTIVE
;
522 XPC_SET_REASON(part
, xpCloneKThreadFailed
, __LINE__
);
523 spin_unlock_irqrestore(&part
->act_lock
, irq_flags
);
528 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
529 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
530 * than one partition, we use an AMO_t structure per partition to indicate
531 * whether a partition has sent an IPI or not. If it has, then wake up the
532 * associated kthread to handle it.
534 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
535 * running on other partitions.
537 * Noteworthy Arguments:
539 * irq - Interrupt ReQuest number. NOT USED.
541 * dev_id - partid of IPI's potential sender.
544 xpc_notify_IRQ_handler(int irq
, void *dev_id
)
546 short partid
= (short)(u64
)dev_id
;
547 struct xpc_partition
*part
= &xpc_partitions
[partid
];
549 DBUG_ON(partid
< 0 || partid
>= xp_max_npartitions
);
551 if (xpc_part_ref(part
)) {
552 xpc_check_for_channel_activity(part
);
554 xpc_part_deref(part
);
560 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
561 * because the write to their associated IPI amo completed after the IRQ/IPI
565 xpc_dropped_IPI_check(struct xpc_partition
*part
)
567 if (xpc_part_ref(part
)) {
568 xpc_check_for_channel_activity(part
);
570 part
->dropped_IPI_timer
.expires
= jiffies
+
571 XPC_P_DROPPED_IPI_WAIT
;
572 add_timer(&part
->dropped_IPI_timer
);
573 xpc_part_deref(part
);
578 xpc_activate_kthreads(struct xpc_channel
*ch
, int needed
)
580 int idle
= atomic_read(&ch
->kthreads_idle
);
581 int assigned
= atomic_read(&ch
->kthreads_assigned
);
584 DBUG_ON(needed
<= 0);
587 wakeup
= (needed
> idle
) ? idle
: needed
;
590 dev_dbg(xpc_chan
, "wakeup %d idle kthreads, partid=%d, "
591 "channel=%d\n", wakeup
, ch
->partid
, ch
->number
);
593 /* only wakeup the requested number of kthreads */
594 wake_up_nr(&ch
->idle_wq
, wakeup
);
600 if (needed
+ assigned
> ch
->kthreads_assigned_limit
) {
601 needed
= ch
->kthreads_assigned_limit
- assigned
;
606 dev_dbg(xpc_chan
, "create %d new kthreads, partid=%d, channel=%d\n",
607 needed
, ch
->partid
, ch
->number
);
609 xpc_create_kthreads(ch
, needed
, 0);
613 * This function is where XPC's kthreads wait for messages to deliver.
616 xpc_kthread_waitmsgs(struct xpc_partition
*part
, struct xpc_channel
*ch
)
619 /* deliver messages to their intended recipients */
621 while (ch
->w_local_GP
.get
< ch
->w_remote_GP
.put
&&
622 !(ch
->flags
& XPC_C_DISCONNECTING
)) {
626 if (atomic_inc_return(&ch
->kthreads_idle
) >
627 ch
->kthreads_idle_limit
) {
628 /* too many idle kthreads on this channel */
629 atomic_dec(&ch
->kthreads_idle
);
633 dev_dbg(xpc_chan
, "idle kthread calling "
634 "wait_event_interruptible_exclusive()\n");
636 (void)wait_event_interruptible_exclusive(ch
->idle_wq
,
637 (ch
->w_local_GP
.get
< ch
->w_remote_GP
.put
||
638 (ch
->flags
& XPC_C_DISCONNECTING
)));
640 atomic_dec(&ch
->kthreads_idle
);
642 } while (!(ch
->flags
& XPC_C_DISCONNECTING
));
646 xpc_kthread_start(void *args
)
648 short partid
= XPC_UNPACK_ARG1(args
);
649 u16 ch_number
= XPC_UNPACK_ARG2(args
);
650 struct xpc_partition
*part
= &xpc_partitions
[partid
];
651 struct xpc_channel
*ch
;
653 unsigned long irq_flags
;
655 dev_dbg(xpc_chan
, "kthread starting, partid=%d, channel=%d\n",
658 ch
= &part
->channels
[ch_number
];
660 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
662 /* let registerer know that connection has been established */
664 spin_lock_irqsave(&ch
->lock
, irq_flags
);
665 if (!(ch
->flags
& XPC_C_CONNECTEDCALLOUT
)) {
666 ch
->flags
|= XPC_C_CONNECTEDCALLOUT
;
667 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
669 xpc_connected_callout(ch
);
671 spin_lock_irqsave(&ch
->lock
, irq_flags
);
672 ch
->flags
|= XPC_C_CONNECTEDCALLOUT_MADE
;
673 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
676 * It is possible that while the callout was being
677 * made that the remote partition sent some messages.
678 * If that is the case, we may need to activate
679 * additional kthreads to help deliver them. We only
680 * need one less than total #of messages to deliver.
682 n_needed
= ch
->w_remote_GP
.put
- ch
->w_local_GP
.get
- 1;
683 if (n_needed
> 0 && !(ch
->flags
& XPC_C_DISCONNECTING
))
684 xpc_activate_kthreads(ch
, n_needed
);
687 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
690 xpc_kthread_waitmsgs(part
, ch
);
693 /* let registerer know that connection is disconnecting */
695 spin_lock_irqsave(&ch
->lock
, irq_flags
);
696 if ((ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) &&
697 !(ch
->flags
& XPC_C_DISCONNECTINGCALLOUT
)) {
698 ch
->flags
|= XPC_C_DISCONNECTINGCALLOUT
;
699 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
701 xpc_disconnect_callout(ch
, xpDisconnecting
);
703 spin_lock_irqsave(&ch
->lock
, irq_flags
);
704 ch
->flags
|= XPC_C_DISCONNECTINGCALLOUT_MADE
;
706 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
708 if (atomic_dec_return(&ch
->kthreads_assigned
) == 0) {
709 if (atomic_dec_return(&part
->nchannels_engaged
) == 0) {
710 xpc_mark_partition_disengaged(part
);
711 xpc_IPI_send_disengage(part
);
715 xpc_msgqueue_deref(ch
);
717 dev_dbg(xpc_chan
, "kthread exiting, partid=%d, channel=%d\n",
720 xpc_part_deref(part
);
725 * For each partition that XPC has established communications with, there is
726 * a minimum of one kernel thread assigned to perform any operation that
727 * may potentially sleep or block (basically the callouts to the asynchronous
728 * functions registered via xpc_connect()).
730 * Additional kthreads are created and destroyed by XPC as the workload
733 * A kthread is assigned to one of the active channels that exists for a given
737 xpc_create_kthreads(struct xpc_channel
*ch
, int needed
,
738 int ignore_disconnecting
)
740 unsigned long irq_flags
;
741 u64 args
= XPC_PACK_ARGS(ch
->partid
, ch
->number
);
742 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
743 struct task_struct
*kthread
;
745 while (needed
-- > 0) {
748 * The following is done on behalf of the newly created
749 * kthread. That kthread is responsible for doing the
750 * counterpart to the following before it exits.
752 if (ignore_disconnecting
) {
753 if (!atomic_inc_not_zero(&ch
->kthreads_assigned
)) {
754 /* kthreads assigned had gone to zero */
756 XPC_C_DISCONNECTINGCALLOUT_MADE
));
760 } else if (ch
->flags
& XPC_C_DISCONNECTING
) {
763 } else if (atomic_inc_return(&ch
->kthreads_assigned
) == 1) {
764 if (atomic_inc_return(&part
->nchannels_engaged
) == 1)
765 xpc_mark_partition_engaged(part
);
767 (void)xpc_part_ref(part
);
768 xpc_msgqueue_ref(ch
);
770 kthread
= kthread_run(xpc_kthread_start
, (void *)args
,
771 "xpc%02dc%d", ch
->partid
, ch
->number
);
772 if (IS_ERR(kthread
)) {
773 /* the fork failed */
776 * NOTE: if (ignore_disconnecting &&
777 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
778 * then we'll deadlock if all other kthreads assigned
779 * to this channel are blocked in the channel's
780 * registerer, because the only thing that will unblock
781 * them is the xpDisconnecting callout that this
782 * failed kthread_run() would have made.
785 if (atomic_dec_return(&ch
->kthreads_assigned
) == 0 &&
786 atomic_dec_return(&part
->nchannels_engaged
) == 0) {
787 xpc_mark_partition_disengaged(part
);
788 xpc_IPI_send_disengage(part
);
790 xpc_msgqueue_deref(ch
);
791 xpc_part_deref(part
);
793 if (atomic_read(&ch
->kthreads_assigned
) <
794 ch
->kthreads_idle_limit
) {
796 * Flag this as an error only if we have an
797 * insufficient #of kthreads for the channel
800 spin_lock_irqsave(&ch
->lock
, irq_flags
);
801 XPC_DISCONNECT_CHANNEL(ch
, xpLackOfResources
,
803 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
811 xpc_disconnect_wait(int ch_number
)
813 unsigned long irq_flags
;
815 struct xpc_partition
*part
;
816 struct xpc_channel
*ch
;
817 int wakeup_channel_mgr
;
819 /* now wait for all callouts to the caller's function to cease */
820 for (partid
= 0; partid
< xp_max_npartitions
; partid
++) {
821 part
= &xpc_partitions
[partid
];
823 if (!xpc_part_ref(part
))
826 ch
= &part
->channels
[ch_number
];
828 if (!(ch
->flags
& XPC_C_WDISCONNECT
)) {
829 xpc_part_deref(part
);
833 wait_for_completion(&ch
->wdisconnect_wait
);
835 spin_lock_irqsave(&ch
->lock
, irq_flags
);
836 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTED
));
837 wakeup_channel_mgr
= 0;
839 if (ch
->delayed_IPI_flags
) {
840 if (part
->act_state
!= XPC_P_DEACTIVATING
) {
841 spin_lock(&part
->IPI_lock
);
842 XPC_SET_IPI_FLAGS(part
->local_IPI_amo
,
844 ch
->delayed_IPI_flags
);
845 spin_unlock(&part
->IPI_lock
);
846 wakeup_channel_mgr
= 1;
848 ch
->delayed_IPI_flags
= 0;
851 ch
->flags
&= ~XPC_C_WDISCONNECT
;
852 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
854 if (wakeup_channel_mgr
)
855 xpc_wakeup_channel_mgr(part
);
857 xpc_part_deref(part
);
862 xpc_do_exit(enum xp_retval reason
)
865 int active_part_count
, printed_waiting_msg
= 0;
866 struct xpc_partition
*part
;
867 unsigned long printmsg_time
, disengage_request_timeout
= 0;
869 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
870 DBUG_ON(xpc_exiting
== 1);
873 * Let the heartbeat checker thread and the discovery thread
874 * (if one is running) know that they should exit. Also wake up
875 * the heartbeat checker thread in case it's sleeping.
878 wake_up_interruptible(&xpc_act_IRQ_wq
);
880 /* ignore all incoming interrupts */
881 free_irq(SGI_XPC_ACTIVATE
, NULL
);
883 /* wait for the discovery thread to exit */
884 wait_for_completion(&xpc_discovery_exited
);
886 /* wait for the heartbeat checker thread to exit */
887 wait_for_completion(&xpc_hb_checker_exited
);
889 /* sleep for a 1/3 of a second or so */
890 (void)msleep_interruptible(300);
892 /* wait for all partitions to become inactive */
894 printmsg_time
= jiffies
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL
* HZ
);
895 xpc_disengage_request_timedout
= 0;
898 active_part_count
= 0;
900 for (partid
= 0; partid
< xp_max_npartitions
; partid
++) {
901 part
= &xpc_partitions
[partid
];
903 if (xpc_partition_disengaged(part
) &&
904 part
->act_state
== XPC_P_INACTIVE
) {
910 XPC_DEACTIVATE_PARTITION(part
, reason
);
912 if (part
->disengage_request_timeout
>
913 disengage_request_timeout
) {
914 disengage_request_timeout
=
915 part
->disengage_request_timeout
;
919 if (xpc_partition_engaged(-1UL)) {
920 if (time_after(jiffies
, printmsg_time
)) {
921 dev_info(xpc_part
, "waiting for remote "
922 "partitions to disengage, timeout in "
924 (disengage_request_timeout
- jiffies
)
926 printmsg_time
= jiffies
+
927 (XPC_DISENGAGE_PRINTMSG_INTERVAL
* HZ
);
928 printed_waiting_msg
= 1;
931 } else if (active_part_count
> 0) {
932 if (printed_waiting_msg
) {
933 dev_info(xpc_part
, "waiting for local partition"
935 printed_waiting_msg
= 0;
939 if (!xpc_disengage_request_timedout
) {
940 dev_info(xpc_part
, "all partitions have "
946 /* sleep for a 1/3 of a second or so */
947 (void)msleep_interruptible(300);
951 DBUG_ON(xpc_partition_engaged(-1UL));
953 /* indicate to others that our reserved page is uninitialized */
954 xpc_rsvd_page
->stamp
= ZERO_STAMP
;
956 /* now it's time to eliminate our heartbeat */
957 del_timer_sync(&xpc_hb_timer
);
958 DBUG_ON(xpc_vars
->heartbeating_to_mask
!= 0);
960 if (reason
== xpUnloading
) {
961 (void)unregister_die_notifier(&xpc_die_notifier
);
962 (void)unregister_reboot_notifier(&xpc_reboot_notifier
);
965 /* close down protections for IPI operations */
966 xpc_restrict_IPI_ops();
968 /* clear the interface to XPC's functions */
969 xpc_clear_interface();
972 unregister_sysctl_table(xpc_sysctl
);
974 kfree(xpc_partitions
);
975 kfree(xpc_remote_copy_buffer_base
);
979 * This function is called when the system is being rebooted.
982 xpc_system_reboot(struct notifier_block
*nb
, unsigned long event
, void *unused
)
984 enum xp_retval reason
;
988 reason
= xpSystemReboot
;
991 reason
= xpSystemHalt
;
994 reason
= xpSystemPoweroff
;
997 reason
= xpSystemGoingDown
;
1000 xpc_do_exit(reason
);
1005 * Notify other partitions to disengage from all references to our memory.
1008 xpc_die_disengage(void)
1010 struct xpc_partition
*part
;
1012 unsigned long engaged
;
1013 long time
, printmsg_time
, disengage_request_timeout
;
1015 /* keep xpc_hb_checker thread from doing anything (just in case) */
1018 xpc_vars
->heartbeating_to_mask
= 0; /* indicate we're deactivated */
1020 for (partid
= 0; partid
< xp_max_npartitions
; partid
++) {
1021 part
= &xpc_partitions
[partid
];
1023 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part
->
1024 remote_vars_version
)) {
1026 /* just in case it was left set by an earlier XPC */
1027 xpc_clear_partition_engaged(1UL << partid
);
1031 if (xpc_partition_engaged(1UL << partid
) ||
1032 part
->act_state
!= XPC_P_INACTIVE
) {
1033 xpc_request_partition_disengage(part
);
1034 xpc_mark_partition_disengaged(part
);
1035 xpc_IPI_send_disengage(part
);
1040 printmsg_time
= time
+
1041 (XPC_DISENGAGE_PRINTMSG_INTERVAL
* sn_rtc_cycles_per_second
);
1042 disengage_request_timeout
= time
+
1043 (xpc_disengage_request_timelimit
* sn_rtc_cycles_per_second
);
1045 /* wait for all other partitions to disengage from us */
1048 engaged
= xpc_partition_engaged(-1UL);
1050 dev_info(xpc_part
, "all partitions have disengaged\n");
1055 if (time
>= disengage_request_timeout
) {
1056 for (partid
= 0; partid
< xp_max_npartitions
;
1058 if (engaged
& (1UL << partid
)) {
1059 dev_info(xpc_part
, "disengage from "
1060 "remote partition %d timed "
1067 if (time
>= printmsg_time
) {
1068 dev_info(xpc_part
, "waiting for remote partitions to "
1069 "disengage, timeout in %ld seconds\n",
1070 (disengage_request_timeout
- time
) /
1071 sn_rtc_cycles_per_second
);
1072 printmsg_time
= time
+
1073 (XPC_DISENGAGE_PRINTMSG_INTERVAL
*
1074 sn_rtc_cycles_per_second
);
1080 * This function is called when the system is being restarted or halted due
1081 * to some sort of system failure. If this is the case we need to notify the
1082 * other partitions to disengage from all references to our memory.
1083 * This function can also be called when our heartbeater could be offlined
1084 * for a time. In this case we need to notify other partitions to not worry
1085 * about the lack of a heartbeat.
1088 xpc_system_die(struct notifier_block
*nb
, unsigned long event
, void *unused
)
1091 case DIE_MACHINE_RESTART
:
1092 case DIE_MACHINE_HALT
:
1093 xpc_die_disengage();
1096 case DIE_KDEBUG_ENTER
:
1097 /* Should lack of heartbeat be ignored by other partitions? */
1098 if (!xpc_kdebug_ignore
)
1102 case DIE_MCA_MONARCH_ENTER
:
1103 case DIE_INIT_MONARCH_ENTER
:
1104 xpc_vars
->heartbeat
++;
1105 xpc_vars
->heartbeat_offline
= 1;
1108 case DIE_KDEBUG_LEAVE
:
1109 /* Is lack of heartbeat being ignored by other partitions? */
1110 if (!xpc_kdebug_ignore
)
1114 case DIE_MCA_MONARCH_LEAVE
:
1115 case DIE_INIT_MONARCH_LEAVE
:
1116 xpc_vars
->heartbeat
++;
1117 xpc_vars
->heartbeat_offline
= 0;
1129 struct xpc_partition
*part
;
1130 struct task_struct
*kthread
;
1135 * The ia64-sn2 architecture supports at most 64 partitions.
1136 * And the inability to unregister remote AMOs restricts us
1137 * further to only support exactly 64 partitions on this
1138 * architecture, no less.
1140 if (xp_max_npartitions
!= 64)
1145 } else if (is_uv()) {
1152 snprintf(xpc_part
->bus_id
, BUS_ID_SIZE
, "part");
1153 snprintf(xpc_chan
->bus_id
, BUS_ID_SIZE
, "chan");
1155 buf_size
= max(XPC_RP_VARS_SIZE
,
1156 XPC_RP_HEADER_SIZE
+ XP_NASID_MASK_BYTES
);
1157 xpc_remote_copy_buffer
= xpc_kmalloc_cacheline_aligned(buf_size
,
1159 &xpc_remote_copy_buffer_base
);
1160 if (xpc_remote_copy_buffer
== NULL
) {
1161 dev_err(xpc_part
, "can't get memory for remote copy buffer\n");
1165 xpc_partitions
= kzalloc(sizeof(struct xpc_partition
) *
1166 xp_max_npartitions
, GFP_KERNEL
);
1167 if (xpc_partitions
== NULL
) {
1168 dev_err(xpc_part
, "can't get memory for partition structure\n");
1174 * The first few fields of each entry of xpc_partitions[] need to
1175 * be initialized now so that calls to xpc_connect() and
1176 * xpc_disconnect() can be made prior to the activation of any remote
1177 * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
1178 * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
1179 * PARTITION HAS BEEN ACTIVATED.
1181 for (partid
= 0; partid
< xp_max_npartitions
; partid
++) {
1182 part
= &xpc_partitions
[partid
];
1184 DBUG_ON((u64
)part
!= L1_CACHE_ALIGN((u64
)part
));
1186 part
->act_IRQ_rcvd
= 0;
1187 spin_lock_init(&part
->act_lock
);
1188 part
->act_state
= XPC_P_INACTIVE
;
1189 XPC_SET_REASON(part
, 0, 0);
1191 init_timer(&part
->disengage_request_timer
);
1192 part
->disengage_request_timer
.function
=
1193 xpc_timeout_partition_disengage_request
;
1194 part
->disengage_request_timer
.data
= (unsigned long)part
;
1196 part
->setup_state
= XPC_P_UNSET
;
1197 init_waitqueue_head(&part
->teardown_wq
);
1198 atomic_set(&part
->references
, 0);
1201 xpc_sysctl
= register_sysctl_table(xpc_sys_dir
);
1204 * Open up protections for IPI operations (and AMO operations on
1205 * Shub 1.1 systems).
1207 xpc_allow_IPI_ops();
1210 * Interrupts being processed will increment this atomic variable and
1211 * awaken the heartbeat thread which will process the interrupts.
1213 atomic_set(&xpc_act_IRQ_rcvd
, 0);
1216 * This is safe to do before the xpc_hb_checker thread has started
1217 * because the handler releases a wait queue. If an interrupt is
1218 * received before the thread is waiting, it will not go to sleep,
1219 * but rather immediately process the interrupt.
1221 ret
= request_irq(SGI_XPC_ACTIVATE
, xpc_act_IRQ_handler
, 0,
1224 dev_err(xpc_part
, "can't register ACTIVATE IRQ handler, "
1225 "errno=%d\n", -ret
);
1231 * Fill the partition reserved page with the information needed by
1232 * other partitions to discover we are alive and establish initial
1235 xpc_rsvd_page
= xpc_setup_rsvd_page();
1236 if (xpc_rsvd_page
== NULL
) {
1237 dev_err(xpc_part
, "can't setup our reserved page\n");
1242 /* add ourselves to the reboot_notifier_list */
1243 ret
= register_reboot_notifier(&xpc_reboot_notifier
);
1245 dev_warn(xpc_part
, "can't register reboot notifier\n");
1247 /* add ourselves to the die_notifier list */
1248 ret
= register_die_notifier(&xpc_die_notifier
);
1250 dev_warn(xpc_part
, "can't register die notifier\n");
1252 init_timer(&xpc_hb_timer
);
1253 xpc_hb_timer
.function
= xpc_hb_beater
;
1256 * The real work-horse behind xpc. This processes incoming
1257 * interrupts and monitors remote heartbeats.
1259 kthread
= kthread_run(xpc_hb_checker
, NULL
, XPC_HB_CHECK_THREAD_NAME
);
1260 if (IS_ERR(kthread
)) {
1261 dev_err(xpc_part
, "failed while forking hb check thread\n");
1267 * Startup a thread that will attempt to discover other partitions to
1268 * activate based on info provided by SAL. This new thread is short
1269 * lived and will exit once discovery is complete.
1271 kthread
= kthread_run(xpc_initiate_discovery
, NULL
,
1272 XPC_DISCOVERY_THREAD_NAME
);
1273 if (IS_ERR(kthread
)) {
1274 dev_err(xpc_part
, "failed while forking discovery thread\n");
1276 /* mark this new thread as a non-starter */
1277 complete(&xpc_discovery_exited
);
1279 xpc_do_exit(xpUnloading
);
1283 /* set the interface to point at XPC's functions */
1284 xpc_set_interface(xpc_initiate_connect
, xpc_initiate_disconnect
,
1285 xpc_initiate_allocate
, xpc_initiate_send
,
1286 xpc_initiate_send_notify
, xpc_initiate_received
,
1287 xpc_initiate_partid_to_nasids
);
1291 /* initialization was not successful */
1293 /* indicate to others that our reserved page is uninitialized */
1294 xpc_rsvd_page
->stamp
= ZERO_STAMP
;
1296 del_timer_sync(&xpc_hb_timer
);
1297 (void)unregister_die_notifier(&xpc_die_notifier
);
1298 (void)unregister_reboot_notifier(&xpc_reboot_notifier
);
1300 free_irq(SGI_XPC_ACTIVATE
, NULL
);
1302 xpc_restrict_IPI_ops();
1304 unregister_sysctl_table(xpc_sysctl
);
1305 kfree(xpc_partitions
);
1307 kfree(xpc_remote_copy_buffer_base
);
1311 module_init(xpc_init
);
1316 xpc_do_exit(xpUnloading
);
1319 module_exit(xpc_exit
);
1321 MODULE_AUTHOR("Silicon Graphics, Inc.");
1322 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1323 MODULE_LICENSE("GPL");
1325 module_param(xpc_hb_interval
, int, 0);
1326 MODULE_PARM_DESC(xpc_hb_interval
, "Number of seconds between "
1327 "heartbeat increments.");
1329 module_param(xpc_hb_check_interval
, int, 0);
1330 MODULE_PARM_DESC(xpc_hb_check_interval
, "Number of seconds between "
1331 "heartbeat checks.");
1333 module_param(xpc_disengage_request_timelimit
, int, 0);
1334 MODULE_PARM_DESC(xpc_disengage_request_timelimit
, "Number of seconds to wait "
1335 "for disengage request to complete.");
1337 module_param(xpc_kdebug_ignore
, int, 0);
1338 MODULE_PARM_DESC(xpc_kdebug_ignore
, "Should lack of heartbeat be ignored by "
1339 "other partitions when dropping into kdebug.");