[IA64-SGI] cleanup XPC disengage related messages
[linux-2.6/zen-sources.git] / arch / ia64 / sn / kernel / xpc_partition.c
blob6bc0409628c5dcab22b890838255b472b9548d3b
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
11 * Cross Partition Communication (XPC) partition support.
13 * This is the part of XPC that detects the presence/absence of
14 * other partitions. It provides a heartbeat and monitors the
15 * heartbeats of other partitions.
20 #include <linux/kernel.h>
21 #include <linux/sysctl.h>
22 #include <linux/cache.h>
23 #include <linux/mmzone.h>
24 #include <linux/nodemask.h>
25 #include <asm/uncached.h>
26 #include <asm/sn/bte.h>
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
29 #include <asm/sn/nodepda.h>
30 #include <asm/sn/addrs.h>
31 #include "xpc.h"
34 /* XPC is exiting flag */
35 int xpc_exiting;
38 /* SH_IPI_ACCESS shub register value on startup */
39 static u64 xpc_sh1_IPI_access;
40 static u64 xpc_sh2_IPI_access0;
41 static u64 xpc_sh2_IPI_access1;
42 static u64 xpc_sh2_IPI_access2;
43 static u64 xpc_sh2_IPI_access3;
46 /* original protection values for each node */
47 u64 xpc_prot_vec[MAX_NUMNODES];
50 /* this partition's reserved page pointers */
51 struct xpc_rsvd_page *xpc_rsvd_page;
52 static u64 *xpc_part_nasids;
53 static u64 *xpc_mach_nasids;
54 struct xpc_vars *xpc_vars;
55 struct xpc_vars_part *xpc_vars_part;
57 static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */
58 static int xp_nasid_mask_words; /* actual size in words of nasid mask */
62 * For performance reasons, each entry of xpc_partitions[] is cacheline
63 * aligned. And xpc_partitions[] is padded with an additional entry at the
64 * end so that the last legitimate entry doesn't share its cacheline with
65 * another variable.
67 struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
71 * Generic buffer used to store a local copy of portions of a remote
72 * partition's reserved page (either its header and part_nasids mask,
73 * or its vars).
75 * xpc_discovery runs only once and is a seperate thread that is
76 * very likely going to be processing in parallel with receiving
77 * interrupts.
79 char ____cacheline_aligned xpc_remote_copy_buffer[XPC_RP_HEADER_SIZE +
80 XP_NASID_MASK_BYTES];
84 * Given a nasid, get the physical address of the partition's reserved page
85 * for that nasid. This function returns 0 on any error.
87 static u64
88 xpc_get_rsvd_page_pa(int nasid)
90 bte_result_t bte_res;
91 s64 status;
92 u64 cookie = 0;
93 u64 rp_pa = nasid; /* seed with nasid */
94 u64 len = 0;
95 u64 buf = buf;
96 u64 buf_len = 0;
97 void *buf_base = NULL;
100 while (1) {
102 status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa,
103 &len);
105 dev_dbg(xpc_part, "SAL returned with status=%li, cookie="
106 "0x%016lx, address=0x%016lx, len=0x%016lx\n",
107 status, cookie, rp_pa, len);
109 if (status != SALRET_MORE_PASSES) {
110 break;
113 if (L1_CACHE_ALIGN(len) > buf_len) {
114 if (buf_base != NULL) {
115 kfree(buf_base);
117 buf_len = L1_CACHE_ALIGN(len);
118 buf = (u64) xpc_kmalloc_cacheline_aligned(buf_len,
119 GFP_KERNEL, &buf_base);
120 if (buf_base == NULL) {
121 dev_err(xpc_part, "unable to kmalloc "
122 "len=0x%016lx\n", buf_len);
123 status = SALRET_ERROR;
124 break;
128 bte_res = xp_bte_copy(rp_pa, ia64_tpa(buf), buf_len,
129 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
130 if (bte_res != BTE_SUCCESS) {
131 dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res);
132 status = SALRET_ERROR;
133 break;
137 if (buf_base != NULL) {
138 kfree(buf_base);
141 if (status != SALRET_OK) {
142 rp_pa = 0;
144 dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa);
145 return rp_pa;
150 * Fill the partition reserved page with the information needed by
151 * other partitions to discover we are alive and establish initial
152 * communications.
154 struct xpc_rsvd_page *
155 xpc_rsvd_page_init(void)
157 struct xpc_rsvd_page *rp;
158 AMO_t *amos_page;
159 u64 rp_pa, nasid_array = 0;
160 int i, ret;
163 /* get the local reserved page's address */
165 preempt_disable();
166 rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id()));
167 preempt_enable();
168 if (rp_pa == 0) {
169 dev_err(xpc_part, "SAL failed to locate the reserved page\n");
170 return NULL;
172 rp = (struct xpc_rsvd_page *) __va(rp_pa);
174 if (rp->partid != sn_partition_id) {
175 dev_err(xpc_part, "the reserved page's partid of %d should be "
176 "%d\n", rp->partid, sn_partition_id);
177 return NULL;
180 rp->version = XPC_RP_VERSION;
182 /* establish the actual sizes of the nasid masks */
183 if (rp->SAL_version == 1) {
184 /* SAL_version 1 didn't set the nasids_size field */
185 rp->nasids_size = 128;
187 xp_nasid_mask_bytes = rp->nasids_size;
188 xp_nasid_mask_words = xp_nasid_mask_bytes / 8;
190 /* setup the pointers to the various items in the reserved page */
191 xpc_part_nasids = XPC_RP_PART_NASIDS(rp);
192 xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp);
193 xpc_vars = XPC_RP_VARS(rp);
194 xpc_vars_part = XPC_RP_VARS_PART(rp);
197 * Before clearing xpc_vars, see if a page of AMOs had been previously
198 * allocated. If not we'll need to allocate one and set permissions
199 * so that cross-partition AMOs are allowed.
201 * The allocated AMO page needs MCA reporting to remain disabled after
202 * XPC has unloaded. To make this work, we keep a copy of the pointer
203 * to this page (i.e., amos_page) in the struct xpc_vars structure,
204 * which is pointed to by the reserved page, and re-use that saved copy
205 * on subsequent loads of XPC. This AMO page is never freed, and its
206 * memory protections are never restricted.
208 if ((amos_page = xpc_vars->amos_page) == NULL) {
209 amos_page = (AMO_t *) TO_AMO(uncached_alloc_page(0));
210 if (amos_page == NULL) {
211 dev_err(xpc_part, "can't allocate page of AMOs\n");
212 return NULL;
216 * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems
217 * when xpc_allow_IPI_ops() is called via xpc_hb_init().
219 if (!enable_shub_wars_1_1()) {
220 ret = sn_change_memprotect(ia64_tpa((u64) amos_page),
221 PAGE_SIZE, SN_MEMPROT_ACCESS_CLASS_1,
222 &nasid_array);
223 if (ret != 0) {
224 dev_err(xpc_part, "can't change memory "
225 "protections\n");
226 uncached_free_page(__IA64_UNCACHED_OFFSET |
227 TO_PHYS((u64) amos_page));
228 return NULL;
231 } else if (!IS_AMO_ADDRESS((u64) amos_page)) {
233 * EFI's XPBOOT can also set amos_page in the reserved page,
234 * but it happens to leave it as an uncached physical address
235 * and we need it to be an uncached virtual, so we'll have to
236 * convert it.
238 if (!IS_AMO_PHYS_ADDRESS((u64) amos_page)) {
239 dev_err(xpc_part, "previously used amos_page address "
240 "is bad = 0x%p\n", (void *) amos_page);
241 return NULL;
243 amos_page = (AMO_t *) TO_AMO((u64) amos_page);
246 /* clear xpc_vars */
247 memset(xpc_vars, 0, sizeof(struct xpc_vars));
249 xpc_vars->version = XPC_V_VERSION;
250 xpc_vars->act_nasid = cpuid_to_nasid(0);
251 xpc_vars->act_phys_cpuid = cpu_physical_id(0);
252 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
253 xpc_vars->amos_page_pa = ia64_tpa((u64) amos_page);
254 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
257 /* clear xpc_vars_part */
258 memset((u64 *) xpc_vars_part, 0, sizeof(struct xpc_vars_part) *
259 XP_MAX_PARTITIONS);
261 /* initialize the activate IRQ related AMO variables */
262 for (i = 0; i < xp_nasid_mask_words; i++) {
263 (void) xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i);
266 /* initialize the engaged remote partitions related AMO variables */
267 (void) xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO);
268 (void) xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO);
270 /* timestamp of when reserved page was setup by XPC */
271 rp->stamp = CURRENT_TIME;
274 * This signifies to the remote partition that our reserved
275 * page is initialized.
277 rp->vars_pa = __pa(xpc_vars);
279 return rp;
284 * Change protections to allow IPI operations (and AMO operations on
285 * Shub 1.1 systems).
287 void
288 xpc_allow_IPI_ops(void)
290 int node;
291 int nasid;
294 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
296 if (is_shub2()) {
297 xpc_sh2_IPI_access0 =
298 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
299 xpc_sh2_IPI_access1 =
300 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
301 xpc_sh2_IPI_access2 =
302 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
303 xpc_sh2_IPI_access3 =
304 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
306 for_each_online_node(node) {
307 nasid = cnodeid_to_nasid(node);
308 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
309 -1UL);
310 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
311 -1UL);
312 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
313 -1UL);
314 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
315 -1UL);
318 } else {
319 xpc_sh1_IPI_access =
320 (u64) HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
322 for_each_online_node(node) {
323 nasid = cnodeid_to_nasid(node);
324 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
325 -1UL);
328 * Since the BIST collides with memory operations on
329 * SHUB 1.1 sn_change_memprotect() cannot be used.
331 if (enable_shub_wars_1_1()) {
332 /* open up everything */
333 xpc_prot_vec[node] = (u64) HUB_L((u64 *)
334 GLOBAL_MMR_ADDR(nasid,
335 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
336 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
337 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
338 -1UL);
339 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
340 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
341 -1UL);
349 * Restrict protections to disallow IPI operations (and AMO operations on
350 * Shub 1.1 systems).
352 void
353 xpc_restrict_IPI_ops(void)
355 int node;
356 int nasid;
359 // >>> Change SH_IPI_ACCESS code to use SAL call once it is available.
361 if (is_shub2()) {
363 for_each_online_node(node) {
364 nasid = cnodeid_to_nasid(node);
365 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
366 xpc_sh2_IPI_access0);
367 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
368 xpc_sh2_IPI_access1);
369 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
370 xpc_sh2_IPI_access2);
371 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
372 xpc_sh2_IPI_access3);
375 } else {
377 for_each_online_node(node) {
378 nasid = cnodeid_to_nasid(node);
379 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
380 xpc_sh1_IPI_access);
382 if (enable_shub_wars_1_1()) {
383 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
384 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
385 xpc_prot_vec[node]);
386 HUB_S((u64 *) GLOBAL_MMR_ADDR(nasid,
387 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
388 xpc_prot_vec[node]);
396 * At periodic intervals, scan through all active partitions and ensure
397 * their heartbeat is still active. If not, the partition is deactivated.
399 void
400 xpc_check_remote_hb(void)
402 struct xpc_vars *remote_vars;
403 struct xpc_partition *part;
404 partid_t partid;
405 bte_result_t bres;
408 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
410 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
412 if (xpc_exiting) {
413 break;
416 if (partid == sn_partition_id) {
417 continue;
420 part = &xpc_partitions[partid];
422 if (part->act_state == XPC_P_INACTIVE ||
423 part->act_state == XPC_P_DEACTIVATING) {
424 continue;
427 /* pull the remote_hb cache line */
428 bres = xp_bte_copy(part->remote_vars_pa,
429 ia64_tpa((u64) remote_vars),
430 XPC_RP_VARS_SIZE,
431 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
432 if (bres != BTE_SUCCESS) {
433 XPC_DEACTIVATE_PARTITION(part,
434 xpc_map_bte_errors(bres));
435 continue;
438 dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
439 " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
440 partid, remote_vars->heartbeat, part->last_heartbeat,
441 remote_vars->heartbeat_offline,
442 remote_vars->heartbeating_to_mask);
444 if (((remote_vars->heartbeat == part->last_heartbeat) &&
445 (remote_vars->heartbeat_offline == 0)) ||
446 !xpc_hb_allowed(sn_partition_id, remote_vars)) {
448 XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
449 continue;
452 part->last_heartbeat = remote_vars->heartbeat;
458 * Get a copy of a portion of the remote partition's rsvd page.
460 * remote_rp points to a buffer that is cacheline aligned for BTE copies and
461 * is large enough to contain a copy of their reserved page header and
462 * part_nasids mask.
464 static enum xpc_retval
465 xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
466 struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa)
468 int bres, i;
471 /* get the reserved page's physical address */
473 *remote_rp_pa = xpc_get_rsvd_page_pa(nasid);
474 if (*remote_rp_pa == 0) {
475 return xpcNoRsvdPageAddr;
479 /* pull over the reserved page header and part_nasids mask */
481 bres = xp_bte_copy(*remote_rp_pa, ia64_tpa((u64) remote_rp),
482 XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes,
483 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
484 if (bres != BTE_SUCCESS) {
485 return xpc_map_bte_errors(bres);
489 if (discovered_nasids != NULL) {
490 u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp);
493 for (i = 0; i < xp_nasid_mask_words; i++) {
494 discovered_nasids[i] |= remote_part_nasids[i];
499 /* check that the partid is for another partition */
501 if (remote_rp->partid < 1 ||
502 remote_rp->partid > (XP_MAX_PARTITIONS - 1)) {
503 return xpcInvalidPartid;
506 if (remote_rp->partid == sn_partition_id) {
507 return xpcLocalPartid;
511 if (XPC_VERSION_MAJOR(remote_rp->version) !=
512 XPC_VERSION_MAJOR(XPC_RP_VERSION)) {
513 return xpcBadVersion;
516 return xpcSuccess;
521 * Get a copy of the remote partition's XPC variables from the reserved page.
523 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
524 * assumed to be of size XPC_RP_VARS_SIZE.
526 static enum xpc_retval
527 xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars)
529 int bres;
532 if (remote_vars_pa == 0) {
533 return xpcVarsNotSet;
537 /* pull over the cross partition variables */
539 bres = xp_bte_copy(remote_vars_pa, ia64_tpa((u64) remote_vars),
540 XPC_RP_VARS_SIZE,
541 (BTE_NOTIFY | BTE_WACQUIRE), NULL);
542 if (bres != BTE_SUCCESS) {
543 return xpc_map_bte_errors(bres);
546 if (XPC_VERSION_MAJOR(remote_vars->version) !=
547 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
548 return xpcBadVersion;
551 return xpcSuccess;
556 * Update the remote partition's info.
558 static void
559 xpc_update_partition_info(struct xpc_partition *part, u8 remote_rp_version,
560 struct timespec *remote_rp_stamp, u64 remote_rp_pa,
561 u64 remote_vars_pa, struct xpc_vars *remote_vars)
563 part->remote_rp_version = remote_rp_version;
564 dev_dbg(xpc_part, " remote_rp_version = 0x%016lx\n",
565 part->remote_rp_version);
567 part->remote_rp_stamp = *remote_rp_stamp;
568 dev_dbg(xpc_part, " remote_rp_stamp (tv_sec = 0x%lx tv_nsec = 0x%lx\n",
569 part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
571 part->remote_rp_pa = remote_rp_pa;
572 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
574 part->remote_vars_pa = remote_vars_pa;
575 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
576 part->remote_vars_pa);
578 part->last_heartbeat = remote_vars->heartbeat;
579 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
580 part->last_heartbeat);
582 part->remote_vars_part_pa = remote_vars->vars_part_pa;
583 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
584 part->remote_vars_part_pa);
586 part->remote_act_nasid = remote_vars->act_nasid;
587 dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n",
588 part->remote_act_nasid);
590 part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid;
591 dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n",
592 part->remote_act_phys_cpuid);
594 part->remote_amos_page_pa = remote_vars->amos_page_pa;
595 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
596 part->remote_amos_page_pa);
598 part->remote_vars_version = remote_vars->version;
599 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
600 part->remote_vars_version);
605 * Prior code has determined the nasid which generated an IPI. Inspect
606 * that nasid to determine if its partition needs to be activated or
607 * deactivated.
609 * A partition is consider "awaiting activation" if our partition
610 * flags indicate it is not active and it has a heartbeat. A
611 * partition is considered "awaiting deactivation" if our partition
612 * flags indicate it is active but it has no heartbeat or it is not
613 * sending its heartbeat to us.
615 * To determine the heartbeat, the remote nasid must have a properly
616 * initialized reserved page.
618 static void
619 xpc_identify_act_IRQ_req(int nasid)
621 struct xpc_rsvd_page *remote_rp;
622 struct xpc_vars *remote_vars;
623 u64 remote_rp_pa;
624 u64 remote_vars_pa;
625 int remote_rp_version;
626 int reactivate = 0;
627 int stamp_diff;
628 struct timespec remote_rp_stamp = { 0, 0 };
629 partid_t partid;
630 struct xpc_partition *part;
631 enum xpc_retval ret;
634 /* pull over the reserved page structure */
636 remote_rp = (struct xpc_rsvd_page *) xpc_remote_copy_buffer;
638 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
639 if (ret != xpcSuccess) {
640 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
641 "which sent interrupt, reason=%d\n", nasid, ret);
642 return;
645 remote_vars_pa = remote_rp->vars_pa;
646 remote_rp_version = remote_rp->version;
647 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
648 remote_rp_stamp = remote_rp->stamp;
650 partid = remote_rp->partid;
651 part = &xpc_partitions[partid];
654 /* pull over the cross partition variables */
656 remote_vars = (struct xpc_vars *) xpc_remote_copy_buffer;
658 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
659 if (ret != xpcSuccess) {
661 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
662 "which sent interrupt, reason=%d\n", nasid, ret);
664 XPC_DEACTIVATE_PARTITION(part, ret);
665 return;
669 part->act_IRQ_rcvd++;
671 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
672 "%ld:0x%lx\n", (int) nasid, (int) partid, part->act_IRQ_rcvd,
673 remote_vars->heartbeat, remote_vars->heartbeating_to_mask);
675 if (xpc_partition_disengaged(part) &&
676 part->act_state == XPC_P_INACTIVE) {
678 xpc_update_partition_info(part, remote_rp_version,
679 &remote_rp_stamp, remote_rp_pa,
680 remote_vars_pa, remote_vars);
682 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
683 if (xpc_partition_disengage_requested(1UL << partid)) {
685 * Other side is waiting on us to disengage,
686 * even though we already have.
688 return;
690 } else {
691 /* other side doesn't support disengage requests */
692 xpc_clear_partition_disengage_request(1UL << partid);
695 xpc_activate_partition(part);
696 return;
699 DBUG_ON(part->remote_rp_version == 0);
700 DBUG_ON(part->remote_vars_version == 0);
702 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
703 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
704 remote_vars_version));
706 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
707 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
708 version));
709 /* see if the other side rebooted */
710 if (part->remote_amos_page_pa ==
711 remote_vars->amos_page_pa &&
712 xpc_hb_allowed(sn_partition_id,
713 remote_vars)) {
714 /* doesn't look that way, so ignore the IPI */
715 return;
720 * Other side rebooted and previous XPC didn't support the
721 * disengage request, so we don't need to do anything special.
724 xpc_update_partition_info(part, remote_rp_version,
725 &remote_rp_stamp, remote_rp_pa,
726 remote_vars_pa, remote_vars);
727 part->reactivate_nasid = nasid;
728 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
729 return;
732 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version));
734 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
735 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
738 * Other side rebooted and previous XPC did support the
739 * disengage request, but the new one doesn't.
742 xpc_clear_partition_engaged(1UL << partid);
743 xpc_clear_partition_disengage_request(1UL << partid);
745 xpc_update_partition_info(part, remote_rp_version,
746 &remote_rp_stamp, remote_rp_pa,
747 remote_vars_pa, remote_vars);
748 reactivate = 1;
750 } else {
751 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
753 stamp_diff = xpc_compare_stamps(&part->remote_rp_stamp,
754 &remote_rp_stamp);
755 if (stamp_diff != 0) {
756 DBUG_ON(stamp_diff >= 0);
759 * Other side rebooted and the previous XPC did support
760 * the disengage request, as does the new one.
763 DBUG_ON(xpc_partition_engaged(1UL << partid));
764 DBUG_ON(xpc_partition_disengage_requested(1UL <<
765 partid));
767 xpc_update_partition_info(part, remote_rp_version,
768 &remote_rp_stamp, remote_rp_pa,
769 remote_vars_pa, remote_vars);
770 reactivate = 1;
774 if (part->disengage_request_timeout > 0 &&
775 !xpc_partition_disengaged(part)) {
776 /* still waiting on other side to disengage from us */
777 return;
780 if (reactivate) {
781 part->reactivate_nasid = nasid;
782 XPC_DEACTIVATE_PARTITION(part, xpcReactivating);
784 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
785 xpc_partition_disengage_requested(1UL << partid)) {
786 XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown);
792 * Loop through the activation AMO variables and process any bits
793 * which are set. Each bit indicates a nasid sending a partition
794 * activation or deactivation request.
796 * Return #of IRQs detected.
799 xpc_identify_act_IRQ_sender(void)
801 int word, bit;
802 u64 nasid_mask;
803 u64 nasid; /* remote nasid */
804 int n_IRQs_detected = 0;
805 AMO_t *act_amos;
808 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
811 /* scan through act AMO variable looking for non-zero entries */
812 for (word = 0; word < xp_nasid_mask_words; word++) {
814 if (xpc_exiting) {
815 break;
818 nasid_mask = xpc_IPI_receive(&act_amos[word]);
819 if (nasid_mask == 0) {
820 /* no IRQs from nasids in this variable */
821 continue;
824 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word,
825 nasid_mask);
829 * If this nasid has been added to the machine since
830 * our partition was reset, this will retain the
831 * remote nasid in our reserved pages machine mask.
832 * This is used in the event of module reload.
834 xpc_mach_nasids[word] |= nasid_mask;
837 /* locate the nasid(s) which sent interrupts */
839 for (bit = 0; bit < (8 * sizeof(u64)); bit++) {
840 if (nasid_mask & (1UL << bit)) {
841 n_IRQs_detected++;
842 nasid = XPC_NASID_FROM_W_B(word, bit);
843 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
844 nasid);
845 xpc_identify_act_IRQ_req(nasid);
849 return n_IRQs_detected;
854 * See if the other side has responded to a partition disengage request
855 * from us.
858 xpc_partition_disengaged(struct xpc_partition *part)
860 partid_t partid = XPC_PARTID(part);
861 int disengaged;
864 disengaged = (xpc_partition_engaged(1UL << partid) == 0);
865 if (part->disengage_request_timeout) {
866 if (!disengaged) {
867 if (jiffies < part->disengage_request_timeout) {
868 /* timelimit hasn't been reached yet */
869 return 0;
873 * Other side hasn't responded to our disengage
874 * request in a timely fashion, so assume it's dead.
877 dev_info(xpc_part, "disengage from remote partition %d "
878 "timed out\n", partid);
879 xpc_disengage_request_timedout = 1;
880 xpc_clear_partition_engaged(1UL << partid);
881 disengaged = 1;
883 part->disengage_request_timeout = 0;
885 /* cancel the timer function, provided it's not us */
886 if (!in_interrupt()) {
887 del_singleshot_timer_sync(&part->
888 disengage_request_timer);
891 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
892 part->act_state != XPC_P_INACTIVE);
893 if (part->act_state != XPC_P_INACTIVE) {
894 xpc_wakeup_channel_mgr(part);
897 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
898 xpc_cancel_partition_disengage_request(part);
901 return disengaged;
906 * Mark specified partition as active.
908 enum xpc_retval
909 xpc_mark_partition_active(struct xpc_partition *part)
911 unsigned long irq_flags;
912 enum xpc_retval ret;
915 dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part));
917 spin_lock_irqsave(&part->act_lock, irq_flags);
918 if (part->act_state == XPC_P_ACTIVATING) {
919 part->act_state = XPC_P_ACTIVE;
920 ret = xpcSuccess;
921 } else {
922 DBUG_ON(part->reason == xpcSuccess);
923 ret = part->reason;
925 spin_unlock_irqrestore(&part->act_lock, irq_flags);
927 return ret;
932 * Notify XPC that the partition is down.
934 void
935 xpc_deactivate_partition(const int line, struct xpc_partition *part,
936 enum xpc_retval reason)
938 unsigned long irq_flags;
941 spin_lock_irqsave(&part->act_lock, irq_flags);
943 if (part->act_state == XPC_P_INACTIVE) {
944 XPC_SET_REASON(part, reason, line);
945 spin_unlock_irqrestore(&part->act_lock, irq_flags);
946 if (reason == xpcReactivating) {
947 /* we interrupt ourselves to reactivate partition */
948 xpc_IPI_send_reactivate(part);
950 return;
952 if (part->act_state == XPC_P_DEACTIVATING) {
953 if ((part->reason == xpcUnloading && reason != xpcUnloading) ||
954 reason == xpcReactivating) {
955 XPC_SET_REASON(part, reason, line);
957 spin_unlock_irqrestore(&part->act_lock, irq_flags);
958 return;
961 part->act_state = XPC_P_DEACTIVATING;
962 XPC_SET_REASON(part, reason, line);
964 spin_unlock_irqrestore(&part->act_lock, irq_flags);
966 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
967 xpc_request_partition_disengage(part);
968 xpc_IPI_send_disengage(part);
970 /* set a timelimit on the disengage request */
971 part->disengage_request_timeout = jiffies +
972 (xpc_disengage_request_timelimit * HZ);
973 part->disengage_request_timer.expires =
974 part->disengage_request_timeout;
975 add_timer(&part->disengage_request_timer);
978 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
979 XPC_PARTID(part), reason);
981 xpc_partition_going_down(part, reason);
986 * Mark specified partition as inactive.
988 void
989 xpc_mark_partition_inactive(struct xpc_partition *part)
991 unsigned long irq_flags;
994 dev_dbg(xpc_part, "setting partition %d to INACTIVE\n",
995 XPC_PARTID(part));
997 spin_lock_irqsave(&part->act_lock, irq_flags);
998 part->act_state = XPC_P_INACTIVE;
999 spin_unlock_irqrestore(&part->act_lock, irq_flags);
1000 part->remote_rp_pa = 0;
1005 * SAL has provided a partition and machine mask. The partition mask
1006 * contains a bit for each even nasid in our partition. The machine
1007 * mask contains a bit for each even nasid in the entire machine.
1009 * Using those two bit arrays, we can determine which nasids are
1010 * known in the machine. Each should also have a reserved page
1011 * initialized if they are available for partitioning.
1013 void
1014 xpc_discovery(void)
1016 void *remote_rp_base;
1017 struct xpc_rsvd_page *remote_rp;
1018 struct xpc_vars *remote_vars;
1019 u64 remote_rp_pa;
1020 u64 remote_vars_pa;
1021 int region;
1022 int region_size;
1023 int max_regions;
1024 int nasid;
1025 struct xpc_rsvd_page *rp;
1026 partid_t partid;
1027 struct xpc_partition *part;
1028 u64 *discovered_nasids;
1029 enum xpc_retval ret;
1032 remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE +
1033 xp_nasid_mask_bytes,
1034 GFP_KERNEL, &remote_rp_base);
1035 if (remote_rp == NULL) {
1036 return;
1038 remote_vars = (struct xpc_vars *) remote_rp;
1041 discovered_nasids = kmalloc(sizeof(u64) * xp_nasid_mask_words,
1042 GFP_KERNEL);
1043 if (discovered_nasids == NULL) {
1044 kfree(remote_rp_base);
1045 return;
1047 memset(discovered_nasids, 0, sizeof(u64) * xp_nasid_mask_words);
1049 rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
1052 * The term 'region' in this context refers to the minimum number of
1053 * nodes that can comprise an access protection grouping. The access
1054 * protection is in regards to memory, IOI and IPI.
1056 max_regions = 64;
1057 region_size = sn_region_size;
1059 switch (region_size) {
1060 case 128:
1061 max_regions *= 2;
1062 case 64:
1063 max_regions *= 2;
1064 case 32:
1065 max_regions *= 2;
1066 region_size = 16;
1067 DBUG_ON(!is_shub2());
1070 for (region = 0; region < max_regions; region++) {
1072 if ((volatile int) xpc_exiting) {
1073 break;
1076 dev_dbg(xpc_part, "searching region %d\n", region);
1078 for (nasid = (region * region_size * 2);
1079 nasid < ((region + 1) * region_size * 2);
1080 nasid += 2) {
1082 if ((volatile int) xpc_exiting) {
1083 break;
1086 dev_dbg(xpc_part, "checking nasid %d\n", nasid);
1089 if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) {
1090 dev_dbg(xpc_part, "PROM indicates Nasid %d is "
1091 "part of the local partition; skipping "
1092 "region\n", nasid);
1093 break;
1096 if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) {
1097 dev_dbg(xpc_part, "PROM indicates Nasid %d was "
1098 "not on Numa-Link network at reset\n",
1099 nasid);
1100 continue;
1103 if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) {
1104 dev_dbg(xpc_part, "Nasid %d is part of a "
1105 "partition which was previously "
1106 "discovered\n", nasid);
1107 continue;
1111 /* pull over the reserved page structure */
1113 ret = xpc_get_remote_rp(nasid, discovered_nasids,
1114 remote_rp, &remote_rp_pa);
1115 if (ret != xpcSuccess) {
1116 dev_dbg(xpc_part, "unable to get reserved page "
1117 "from nasid %d, reason=%d\n", nasid,
1118 ret);
1120 if (ret == xpcLocalPartid) {
1121 break;
1123 continue;
1126 remote_vars_pa = remote_rp->vars_pa;
1128 partid = remote_rp->partid;
1129 part = &xpc_partitions[partid];
1132 /* pull over the cross partition variables */
1134 ret = xpc_get_remote_vars(remote_vars_pa, remote_vars);
1135 if (ret != xpcSuccess) {
1136 dev_dbg(xpc_part, "unable to get XPC variables "
1137 "from nasid %d, reason=%d\n", nasid,
1138 ret);
1140 XPC_DEACTIVATE_PARTITION(part, ret);
1141 continue;
1144 if (part->act_state != XPC_P_INACTIVE) {
1145 dev_dbg(xpc_part, "partition %d on nasid %d is "
1146 "already activating\n", partid, nasid);
1147 break;
1151 * Register the remote partition's AMOs with SAL so it
1152 * can handle and cleanup errors within that address
1153 * range should the remote partition go down. We don't
1154 * unregister this range because it is difficult to
1155 * tell when outstanding writes to the remote partition
1156 * are finished and thus when it is thus safe to
1157 * unregister. This should not result in wasted space
1158 * in the SAL xp_addr_region table because we should
1159 * get the same page for remote_act_amos_pa after
1160 * module reloads and system reboots.
1162 if (sn_register_xp_addr_region(
1163 remote_vars->amos_page_pa,
1164 PAGE_SIZE, 1) < 0) {
1165 dev_dbg(xpc_part, "partition %d failed to "
1166 "register xp_addr region 0x%016lx\n",
1167 partid, remote_vars->amos_page_pa);
1169 XPC_SET_REASON(part, xpcPhysAddrRegFailed,
1170 __LINE__);
1171 break;
1175 * The remote nasid is valid and available.
1176 * Send an interrupt to that nasid to notify
1177 * it that we are ready to begin activation.
1179 dev_dbg(xpc_part, "sending an interrupt to AMO 0x%lx, "
1180 "nasid %d, phys_cpuid 0x%x\n",
1181 remote_vars->amos_page_pa,
1182 remote_vars->act_nasid,
1183 remote_vars->act_phys_cpuid);
1185 if (XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
1186 version)) {
1187 part->remote_amos_page_pa =
1188 remote_vars->amos_page_pa;
1189 xpc_mark_partition_disengaged(part);
1190 xpc_cancel_partition_disengage_request(part);
1192 xpc_IPI_send_activate(remote_vars);
1196 kfree(discovered_nasids);
1197 kfree(remote_rp_base);
1202 * Given a partid, get the nasids owned by that partition from the
1203 * remote partition's reserved page.
1205 enum xpc_retval
1206 xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask)
1208 struct xpc_partition *part;
1209 u64 part_nasid_pa;
1210 int bte_res;
1213 part = &xpc_partitions[partid];
1214 if (part->remote_rp_pa == 0) {
1215 return xpcPartitionDown;
1218 memset(nasid_mask, 0, XP_NASID_MASK_BYTES);
1220 part_nasid_pa = (u64) XPC_RP_PART_NASIDS(part->remote_rp_pa);
1222 bte_res = xp_bte_copy(part_nasid_pa, ia64_tpa((u64) nasid_mask),
1223 xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
1225 return xpc_map_bte_errors(bte_res);