sgi-xp: cleanup naming of partition defines
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / sgi-xp / xpc_sn2.c
blobd1ccadc0857d094f20b69bbe38077a38a27eb1f7
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 */
9 /*
10 * Cross Partition Communication (XPC) sn2-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/delay.h>
17 #include <asm/uncached.h>
18 #include <asm/sn/mspec.h>
19 #include <asm/sn/sn_sal.h>
20 #include "xpc.h"
23 * Define the number of u64s required to represent all the C-brick nasids
24 * as a bitmap. The cross-partition kernel modules deal only with
25 * C-brick nasids, thus the need for bitmaps which don't account for
26 * odd-numbered (non C-brick) nasids.
28 #define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
29 #define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
30 #define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
33 * Memory for XPC's amo variables is allocated by the MSPEC driver. These
34 * pages are located in the lowest granule. The lowest granule uses 4k pages
35 * for cached references and an alternate TLB handler to never provide a
36 * cacheable mapping for the entire region. This will prevent speculative
37 * reading of cached copies of our lines from being issued which will cause
38 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
39 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
40 * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
41 * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
42 * partitions (i.e., XPCs) consider themselves currently engaged with the
43 * local XPC and 1 amo variable to request partition deactivation.
45 #define XPC_NOTIFY_IRQ_AMOS_SN2 0
46 #define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
47 XP_MAX_NPARTITIONS_SN2)
48 #define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
49 XP_NASID_MASK_WORDS_SN2)
50 #define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
53 * Buffer used to store a local copy of portions of a remote partition's
54 * reserved page (either its header and part_nasids mask, or its vars).
56 static char *xpc_remote_copy_buffer_sn2;
57 static void *xpc_remote_copy_buffer_base_sn2;
59 static struct xpc_vars_sn2 *xpc_vars_sn2;
60 static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
62 /* SH_IPI_ACCESS shub register value on startup */
63 static u64 xpc_sh1_IPI_access_sn2;
64 static u64 xpc_sh2_IPI_access0_sn2;
65 static u64 xpc_sh2_IPI_access1_sn2;
66 static u64 xpc_sh2_IPI_access2_sn2;
67 static u64 xpc_sh2_IPI_access3_sn2;
70 * Change protections to allow IPI operations.
72 static void
73 xpc_allow_IPI_ops_sn2(void)
75 int node;
76 int nasid;
78 /* !!! The following should get moved into SAL. */
79 if (is_shub2()) {
80 xpc_sh2_IPI_access0_sn2 =
81 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
82 xpc_sh2_IPI_access1_sn2 =
83 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
84 xpc_sh2_IPI_access2_sn2 =
85 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
86 xpc_sh2_IPI_access3_sn2 =
87 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
89 for_each_online_node(node) {
90 nasid = cnodeid_to_nasid(node);
91 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
92 -1UL);
93 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
94 -1UL);
95 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
96 -1UL);
97 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
98 -1UL);
100 } else {
101 xpc_sh1_IPI_access_sn2 =
102 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
104 for_each_online_node(node) {
105 nasid = cnodeid_to_nasid(node);
106 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
107 -1UL);
113 * Restrict protections to disallow IPI operations.
115 static void
116 xpc_disallow_IPI_ops_sn2(void)
118 int node;
119 int nasid;
121 /* !!! The following should get moved into SAL. */
122 if (is_shub2()) {
123 for_each_online_node(node) {
124 nasid = cnodeid_to_nasid(node);
125 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
126 xpc_sh2_IPI_access0_sn2);
127 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
128 xpc_sh2_IPI_access1_sn2);
129 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
130 xpc_sh2_IPI_access2_sn2);
131 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
132 xpc_sh2_IPI_access3_sn2);
134 } else {
135 for_each_online_node(node) {
136 nasid = cnodeid_to_nasid(node);
137 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
138 xpc_sh1_IPI_access_sn2);
144 * The following set of functions are used for the sending and receiving of
145 * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
146 * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
147 * is associated with channel activity (SGI_XPC_NOTIFY).
150 static u64
151 xpc_receive_IRQ_amo_sn2(struct amo *amo)
153 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
156 static enum xp_retval
157 xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
158 int vector)
160 int ret = 0;
161 unsigned long irq_flags;
163 local_irq_save(irq_flags);
165 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, flag);
166 sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
169 * We must always use the nofault function regardless of whether we
170 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
171 * didn't, we'd never know that the other partition is down and would
172 * keep sending IRQs and amos to it until the heartbeat times out.
174 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
175 xp_nofault_PIOR_target));
177 local_irq_restore(irq_flags);
179 return (ret == 0) ? xpSuccess : xpPioReadError;
182 static struct amo *
183 xpc_init_IRQ_amo_sn2(int index)
185 struct amo *amo = xpc_vars_sn2->amos_page + index;
187 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
188 return amo;
192 * Functions associated with SGI_XPC_ACTIVATE IRQ.
196 * Notify the heartbeat check thread that an activate IRQ has been received.
198 static irqreturn_t
199 xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
201 atomic_inc(&xpc_activate_IRQ_rcvd);
202 wake_up_interruptible(&xpc_activate_IRQ_wq);
203 return IRQ_HANDLED;
207 * Flag the appropriate amo variable and send an IRQ to the specified node.
209 static void
210 xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid,
211 int to_nasid, int to_phys_cpuid)
213 struct amo *amos = (struct amo *)__va(amos_page_pa +
214 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
215 sizeof(struct amo)));
217 (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)],
218 BIT_MASK(from_nasid / 2), to_nasid,
219 to_phys_cpuid, SGI_XPC_ACTIVATE);
222 static void
223 xpc_send_local_activate_IRQ_sn2(int from_nasid)
225 struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
226 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
227 sizeof(struct amo)));
229 /* fake the sending and receipt of an activate IRQ from remote nasid */
230 FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable),
231 FETCHOP_OR, BIT_MASK(from_nasid / 2));
233 atomic_inc(&xpc_activate_IRQ_rcvd);
234 wake_up_interruptible(&xpc_activate_IRQ_wq);
238 * Functions associated with SGI_XPC_NOTIFY IRQ.
242 * Check to see if any chctl flags were sent from the specified partition.
244 static void
245 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
247 union xpc_channel_ctl_flags chctl;
248 unsigned long irq_flags;
250 chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
251 local_chctl_amo_va);
252 if (chctl.all_flags == 0)
253 return;
255 spin_lock_irqsave(&part->chctl_lock, irq_flags);
256 part->chctl.all_flags |= chctl.all_flags;
257 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
259 dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
260 "0x%lx\n", XPC_PARTID(part), chctl.all_flags);
262 xpc_wakeup_channel_mgr(part);
266 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
267 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
268 * than one partition, we use an amo structure per partition to indicate
269 * whether a partition has sent an IRQ or not. If it has, then wake up the
270 * associated kthread to handle it.
272 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
273 * running on other partitions.
275 * Noteworthy Arguments:
277 * irq - Interrupt ReQuest number. NOT USED.
279 * dev_id - partid of IRQ's potential sender.
281 static irqreturn_t
282 xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
284 short partid = (short)(u64)dev_id;
285 struct xpc_partition *part = &xpc_partitions[partid];
287 DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2);
289 if (xpc_part_ref(part)) {
290 xpc_check_for_sent_chctl_flags_sn2(part);
292 xpc_part_deref(part);
294 return IRQ_HANDLED;
298 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
299 * because the write to their associated amo variable completed after the IRQ
300 * was received.
302 static void
303 xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
305 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
307 if (xpc_part_ref(part)) {
308 xpc_check_for_sent_chctl_flags_sn2(part);
310 part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
311 XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
312 add_timer(&part_sn2->dropped_notify_IRQ_timer);
313 xpc_part_deref(part);
318 * Send a notify IRQ to the remote partition that is associated with the
319 * specified channel.
321 static void
322 xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
323 char *chctl_flag_string, unsigned long *irq_flags)
325 struct xpc_partition *part = &xpc_partitions[ch->partid];
326 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
327 union xpc_channel_ctl_flags chctl = { 0 };
328 enum xp_retval ret;
330 if (likely(part->act_state != XPC_P_AS_DEACTIVATING)) {
331 chctl.flags[ch->number] = chctl_flag;
332 ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
333 chctl.all_flags,
334 part_sn2->notify_IRQ_nasid,
335 part_sn2->notify_IRQ_phys_cpuid,
336 SGI_XPC_NOTIFY);
337 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
338 chctl_flag_string, ch->partid, ch->number, ret);
339 if (unlikely(ret != xpSuccess)) {
340 if (irq_flags != NULL)
341 spin_unlock_irqrestore(&ch->lock, *irq_flags);
342 XPC_DEACTIVATE_PARTITION(part, ret);
343 if (irq_flags != NULL)
344 spin_lock_irqsave(&ch->lock, *irq_flags);
349 #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
350 xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
353 * Make it look like the remote partition, which is associated with the
354 * specified channel, sent us a notify IRQ. This faked IRQ will be handled
355 * by xpc_check_for_dropped_notify_IRQ_sn2().
357 static void
358 xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
359 char *chctl_flag_string)
361 struct xpc_partition *part = &xpc_partitions[ch->partid];
362 union xpc_channel_ctl_flags chctl = { 0 };
364 chctl.flags[ch->number] = chctl_flag;
365 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
366 variable), FETCHOP_OR, chctl.all_flags);
367 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
368 chctl_flag_string, ch->partid, ch->number);
371 #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
372 xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
374 static void
375 xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
376 unsigned long *irq_flags)
378 struct xpc_openclose_args *args = ch->local_openclose_args;
380 args->reason = ch->reason;
381 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
384 static void
385 xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
387 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags);
390 static void
391 xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
393 struct xpc_openclose_args *args = ch->local_openclose_args;
395 args->msg_size = ch->msg_size;
396 args->local_nentries = ch->local_nentries;
397 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags);
400 static void
401 xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
403 struct xpc_openclose_args *args = ch->local_openclose_args;
405 args->remote_nentries = ch->remote_nentries;
406 args->local_nentries = ch->local_nentries;
407 args->local_msgqueue_pa = xp_pa(ch->local_msgqueue);
408 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
411 static void
412 xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
414 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
417 static void
418 xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
420 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
424 * This next set of functions are used to keep track of when a partition is
425 * potentially engaged in accessing memory belonging to another partition.
428 static void
429 xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
431 unsigned long irq_flags;
432 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
433 (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
434 sizeof(struct amo)));
436 local_irq_save(irq_flags);
438 /* set bit corresponding to our partid in remote partition's amo */
439 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
440 BIT(sn_partition_id));
443 * We must always use the nofault function regardless of whether we
444 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
445 * didn't, we'd never know that the other partition is down and would
446 * keep sending IRQs and amos to it until the heartbeat times out.
448 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
449 variable),
450 xp_nofault_PIOR_target));
452 local_irq_restore(irq_flags);
455 static void
456 xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
458 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
459 unsigned long irq_flags;
460 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
461 (XPC_ENGAGED_PARTITIONS_AMO_SN2 *
462 sizeof(struct amo)));
464 local_irq_save(irq_flags);
466 /* clear bit corresponding to our partid in remote partition's amo */
467 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
468 ~BIT(sn_partition_id));
471 * We must always use the nofault function regardless of whether we
472 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
473 * didn't, we'd never know that the other partition is down and would
474 * keep sending IRQs and amos to it until the heartbeat times out.
476 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
477 variable),
478 xp_nofault_PIOR_target));
480 local_irq_restore(irq_flags);
483 * Send activate IRQ to get other side to see that we've cleared our
484 * bit in their engaged partitions amo.
486 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
487 cnodeid_to_nasid(0),
488 part_sn2->activate_IRQ_nasid,
489 part_sn2->activate_IRQ_phys_cpuid);
492 static int
493 xpc_partition_engaged_sn2(short partid)
495 struct amo *amo = xpc_vars_sn2->amos_page +
496 XPC_ENGAGED_PARTITIONS_AMO_SN2;
498 /* our partition's amo variable ANDed with partid mask */
499 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
500 BIT(partid)) != 0;
503 static int
504 xpc_any_partition_engaged_sn2(void)
506 struct amo *amo = xpc_vars_sn2->amos_page +
507 XPC_ENGAGED_PARTITIONS_AMO_SN2;
509 /* our partition's amo variable */
510 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
513 static void
514 xpc_assume_partition_disengaged_sn2(short partid)
516 struct amo *amo = xpc_vars_sn2->amos_page +
517 XPC_ENGAGED_PARTITIONS_AMO_SN2;
519 /* clear bit(s) based on partid mask in our partition's amo */
520 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
521 ~BIT(partid));
524 /* original protection values for each node */
525 static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
528 * Change protections to allow amo operations on non-Shub 1.1 systems.
530 static enum xp_retval
531 xpc_allow_amo_ops_sn2(struct amo *amos_page)
533 u64 nasid_array = 0;
534 int ret;
537 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
538 * collides with memory operations. On those systems we call
539 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
541 if (!enable_shub_wars_1_1()) {
542 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
543 SN_MEMPROT_ACCESS_CLASS_1,
544 &nasid_array);
545 if (ret != 0)
546 return xpSalError;
548 return xpSuccess;
552 * Change protections to allow amo operations on Shub 1.1 systems.
554 static void
555 xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
557 int node;
558 int nasid;
560 if (!enable_shub_wars_1_1())
561 return;
563 for_each_online_node(node) {
564 nasid = cnodeid_to_nasid(node);
565 /* save current protection values */
566 xpc_prot_vec_sn2[node] =
567 (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
568 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
569 /* open up everything */
570 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
571 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
572 -1UL);
573 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
574 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
575 -1UL);
579 static enum xp_retval
580 xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa,
581 size_t *len)
583 s64 status;
584 enum xp_retval ret;
586 status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
587 if (status == SALRET_OK)
588 ret = xpSuccess;
589 else if (status == SALRET_MORE_PASSES)
590 ret = xpNeedMoreInfo;
591 else
592 ret = xpSalError;
594 return ret;
598 static enum xp_retval
599 xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
601 struct amo *amos_page;
602 int i;
603 int ret;
605 xpc_vars_sn2 = XPC_RP_VARS(rp);
607 rp->sn.vars_pa = xp_pa(xpc_vars_sn2);
609 /* vars_part array follows immediately after vars */
610 xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
611 XPC_RP_VARS_SIZE);
614 * Before clearing xpc_vars_sn2, see if a page of amos had been
615 * previously allocated. If not we'll need to allocate one and set
616 * permissions so that cross-partition amos are allowed.
618 * The allocated amo page needs MCA reporting to remain disabled after
619 * XPC has unloaded. To make this work, we keep a copy of the pointer
620 * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
621 * which is pointed to by the reserved page, and re-use that saved copy
622 * on subsequent loads of XPC. This amo page is never freed, and its
623 * memory protections are never restricted.
625 amos_page = xpc_vars_sn2->amos_page;
626 if (amos_page == NULL) {
627 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
628 if (amos_page == NULL) {
629 dev_err(xpc_part, "can't allocate page of amos\n");
630 return xpNoMemory;
634 * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
635 * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
637 ret = xpc_allow_amo_ops_sn2(amos_page);
638 if (ret != xpSuccess) {
639 dev_err(xpc_part, "can't allow amo operations\n");
640 uncached_free_page(__IA64_UNCACHED_OFFSET |
641 TO_PHYS((u64)amos_page), 1);
642 return ret;
646 /* clear xpc_vars_sn2 */
647 memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
649 xpc_vars_sn2->version = XPC_V_VERSION;
650 xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
651 xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
652 xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2);
653 xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
654 xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
656 /* clear xpc_vars_part_sn2 */
657 memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
658 XP_MAX_NPARTITIONS_SN2);
660 /* initialize the activate IRQ related amo variables */
661 for (i = 0; i < xpc_nasid_mask_nlongs; i++)
662 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i);
664 /* initialize the engaged remote partitions related amo variables */
665 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2);
666 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2);
668 return xpSuccess;
671 static void
672 xpc_increment_heartbeat_sn2(void)
674 xpc_vars_sn2->heartbeat++;
677 static void
678 xpc_offline_heartbeat_sn2(void)
680 xpc_increment_heartbeat_sn2();
681 xpc_vars_sn2->heartbeat_offline = 1;
684 static void
685 xpc_online_heartbeat_sn2(void)
687 xpc_increment_heartbeat_sn2();
688 xpc_vars_sn2->heartbeat_offline = 0;
691 static void
692 xpc_heartbeat_init_sn2(void)
694 DBUG_ON(xpc_vars_sn2 == NULL);
696 bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
697 xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0];
698 xpc_online_heartbeat_sn2();
701 static void
702 xpc_heartbeat_exit_sn2(void)
704 xpc_offline_heartbeat_sn2();
707 static enum xp_retval
708 xpc_get_remote_heartbeat_sn2(struct xpc_partition *part)
710 struct xpc_vars_sn2 *remote_vars;
711 enum xp_retval ret;
713 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
715 /* pull the remote vars structure that contains the heartbeat */
716 ret = xp_remote_memcpy(xp_pa(remote_vars),
717 part->sn.sn2.remote_vars_pa,
718 XPC_RP_VARS_SIZE);
719 if (ret != xpSuccess)
720 return ret;
722 dev_dbg(xpc_part, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, "
723 "heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part),
724 remote_vars->heartbeat, part->last_heartbeat,
725 remote_vars->heartbeat_offline,
726 remote_vars->heartbeating_to_mask[0]);
728 if ((remote_vars->heartbeat == part->last_heartbeat &&
729 remote_vars->heartbeat_offline == 0) ||
730 !xpc_hb_allowed(sn_partition_id,
731 &remote_vars->heartbeating_to_mask)) {
732 ret = xpNoHeartbeat;
733 } else {
734 part->last_heartbeat = remote_vars->heartbeat;
737 return ret;
741 * Get a copy of the remote partition's XPC variables from the reserved page.
743 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
744 * assumed to be of size XPC_RP_VARS_SIZE.
746 static enum xp_retval
747 xpc_get_remote_vars_sn2(unsigned long remote_vars_pa,
748 struct xpc_vars_sn2 *remote_vars)
750 enum xp_retval ret;
752 if (remote_vars_pa == 0)
753 return xpVarsNotSet;
755 /* pull over the cross partition variables */
756 ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa,
757 XPC_RP_VARS_SIZE);
758 if (ret != xpSuccess)
759 return ret;
761 if (XPC_VERSION_MAJOR(remote_vars->version) !=
762 XPC_VERSION_MAJOR(XPC_V_VERSION)) {
763 return xpBadVersion;
766 return xpSuccess;
769 static void
770 xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
771 unsigned long remote_rp_pa, int nasid)
773 xpc_send_local_activate_IRQ_sn2(nasid);
776 static void
777 xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
779 xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
782 static void
783 xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
785 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
786 unsigned long irq_flags;
787 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
788 (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
789 sizeof(struct amo)));
791 local_irq_save(irq_flags);
793 /* set bit corresponding to our partid in remote partition's amo */
794 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
795 BIT(sn_partition_id));
798 * We must always use the nofault function regardless of whether we
799 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
800 * didn't, we'd never know that the other partition is down and would
801 * keep sending IRQs and amos to it until the heartbeat times out.
803 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
804 variable),
805 xp_nofault_PIOR_target));
807 local_irq_restore(irq_flags);
810 * Send activate IRQ to get other side to see that we've set our
811 * bit in their deactivate request amo.
813 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
814 cnodeid_to_nasid(0),
815 part_sn2->activate_IRQ_nasid,
816 part_sn2->activate_IRQ_phys_cpuid);
819 static void
820 xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
822 unsigned long irq_flags;
823 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
824 (XPC_DEACTIVATE_REQUEST_AMO_SN2 *
825 sizeof(struct amo)));
827 local_irq_save(irq_flags);
829 /* clear bit corresponding to our partid in remote partition's amo */
830 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
831 ~BIT(sn_partition_id));
834 * We must always use the nofault function regardless of whether we
835 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
836 * didn't, we'd never know that the other partition is down and would
837 * keep sending IRQs and amos to it until the heartbeat times out.
839 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
840 variable),
841 xp_nofault_PIOR_target));
843 local_irq_restore(irq_flags);
846 static int
847 xpc_partition_deactivation_requested_sn2(short partid)
849 struct amo *amo = xpc_vars_sn2->amos_page +
850 XPC_DEACTIVATE_REQUEST_AMO_SN2;
852 /* our partition's amo variable ANDed with partid mask */
853 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
854 BIT(partid)) != 0;
858 * Update the remote partition's info.
860 static void
861 xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
862 unsigned long *remote_rp_ts_jiffies,
863 unsigned long remote_rp_pa,
864 unsigned long remote_vars_pa,
865 struct xpc_vars_sn2 *remote_vars)
867 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
869 part->remote_rp_version = remote_rp_version;
870 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
871 part->remote_rp_version);
873 part->remote_rp_ts_jiffies = *remote_rp_ts_jiffies;
874 dev_dbg(xpc_part, " remote_rp_ts_jiffies = 0x%016lx\n",
875 part->remote_rp_ts_jiffies);
877 part->remote_rp_pa = remote_rp_pa;
878 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
880 part_sn2->remote_vars_pa = remote_vars_pa;
881 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
882 part_sn2->remote_vars_pa);
884 part->last_heartbeat = remote_vars->heartbeat;
885 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
886 part->last_heartbeat);
888 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
889 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
890 part_sn2->remote_vars_part_pa);
892 part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
893 dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
894 part_sn2->activate_IRQ_nasid);
896 part_sn2->activate_IRQ_phys_cpuid =
897 remote_vars->activate_IRQ_phys_cpuid;
898 dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
899 part_sn2->activate_IRQ_phys_cpuid);
901 part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
902 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
903 part_sn2->remote_amos_page_pa);
905 part_sn2->remote_vars_version = remote_vars->version;
906 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
907 part_sn2->remote_vars_version);
911 * Prior code has determined the nasid which generated a activate IRQ.
912 * Inspect that nasid to determine if its partition needs to be activated
913 * or deactivated.
915 * A partition is considered "awaiting activation" if our partition
916 * flags indicate it is not active and it has a heartbeat. A
917 * partition is considered "awaiting deactivation" if our partition
918 * flags indicate it is active but it has no heartbeat or it is not
919 * sending its heartbeat to us.
921 * To determine the heartbeat, the remote nasid must have a properly
922 * initialized reserved page.
924 static void
925 xpc_identify_activate_IRQ_req_sn2(int nasid)
927 struct xpc_rsvd_page *remote_rp;
928 struct xpc_vars_sn2 *remote_vars;
929 unsigned long remote_rp_pa;
930 unsigned long remote_vars_pa;
931 int remote_rp_version;
932 int reactivate = 0;
933 unsigned long remote_rp_ts_jiffies = 0;
934 short partid;
935 struct xpc_partition *part;
936 struct xpc_partition_sn2 *part_sn2;
937 enum xp_retval ret;
939 /* pull over the reserved page structure */
941 remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2;
943 ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa);
944 if (ret != xpSuccess) {
945 dev_warn(xpc_part, "unable to get reserved page from nasid %d, "
946 "which sent interrupt, reason=%d\n", nasid, ret);
947 return;
950 remote_vars_pa = remote_rp->sn.vars_pa;
951 remote_rp_version = remote_rp->version;
952 remote_rp_ts_jiffies = remote_rp->ts_jiffies;
954 partid = remote_rp->SAL_partid;
955 part = &xpc_partitions[partid];
956 part_sn2 = &part->sn.sn2;
958 /* pull over the cross partition variables */
960 remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2;
962 ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
963 if (ret != xpSuccess) {
964 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
965 "which sent interrupt, reason=%d\n", nasid, ret);
967 XPC_DEACTIVATE_PARTITION(part, ret);
968 return;
971 part->activate_IRQ_rcvd++;
973 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
974 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
975 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
977 if (xpc_partition_disengaged(part) &&
978 part->act_state == XPC_P_AS_INACTIVE) {
980 xpc_update_partition_info_sn2(part, remote_rp_version,
981 &remote_rp_ts_jiffies,
982 remote_rp_pa, remote_vars_pa,
983 remote_vars);
985 if (xpc_partition_deactivation_requested_sn2(partid)) {
987 * Other side is waiting on us to deactivate even though
988 * we already have.
990 return;
993 xpc_activate_partition(part);
994 return;
997 DBUG_ON(part->remote_rp_version == 0);
998 DBUG_ON(part_sn2->remote_vars_version == 0);
1000 if (remote_rp_ts_jiffies != part->remote_rp_ts_jiffies) {
1002 /* the other side rebooted */
1004 DBUG_ON(xpc_partition_engaged_sn2(partid));
1005 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
1007 xpc_update_partition_info_sn2(part, remote_rp_version,
1008 &remote_rp_ts_jiffies,
1009 remote_rp_pa, remote_vars_pa,
1010 remote_vars);
1011 reactivate = 1;
1014 if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
1015 /* still waiting on other side to disengage from us */
1016 return;
1019 if (reactivate)
1020 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
1021 else if (xpc_partition_deactivation_requested_sn2(partid))
1022 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
1026 * Loop through the activation amo variables and process any bits
1027 * which are set. Each bit indicates a nasid sending a partition
1028 * activation or deactivation request.
1030 * Return #of IRQs detected.
1033 xpc_identify_activate_IRQ_sender_sn2(void)
1035 int l;
1036 int b;
1037 unsigned long nasid_mask_long;
1038 u64 nasid; /* remote nasid */
1039 int n_IRQs_detected = 0;
1040 struct amo *act_amos;
1042 act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
1044 /* scan through activate amo variables looking for non-zero entries */
1045 for (l = 0; l < xpc_nasid_mask_nlongs; l++) {
1047 if (xpc_exiting)
1048 break;
1050 nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]);
1052 b = find_first_bit(&nasid_mask_long, BITS_PER_LONG);
1053 if (b >= BITS_PER_LONG) {
1054 /* no IRQs from nasids in this amo variable */
1055 continue;
1058 dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l,
1059 nasid_mask_long);
1062 * If this nasid has been added to the machine since
1063 * our partition was reset, this will retain the
1064 * remote nasid in our reserved pages machine mask.
1065 * This is used in the event of module reload.
1067 xpc_mach_nasids[l] |= nasid_mask_long;
1069 /* locate the nasid(s) which sent interrupts */
1071 do {
1072 n_IRQs_detected++;
1073 nasid = (l * BITS_PER_LONG + b) * 2;
1074 dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid);
1075 xpc_identify_activate_IRQ_req_sn2(nasid);
1077 b = find_next_bit(&nasid_mask_long, BITS_PER_LONG,
1078 b + 1);
1079 } while (b < BITS_PER_LONG);
1081 return n_IRQs_detected;
1084 static void
1085 xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
1087 int n_IRQs_detected;
1089 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1090 if (n_IRQs_detected < n_IRQs_expected) {
1091 /* retry once to help avoid missing amo */
1092 (void)xpc_identify_activate_IRQ_sender_sn2();
1097 * Guarantee that the kzalloc'd memory is cacheline aligned.
1099 static void *
1100 xpc_kzalloc_cacheline_aligned_sn2(size_t size, gfp_t flags, void **base)
1102 /* see if kzalloc will give us cachline aligned memory by default */
1103 *base = kzalloc(size, flags);
1104 if (*base == NULL)
1105 return NULL;
1107 if ((u64)*base == L1_CACHE_ALIGN((u64)*base))
1108 return *base;
1110 kfree(*base);
1112 /* nope, we'll have to do it ourselves */
1113 *base = kzalloc(size + L1_CACHE_BYTES, flags);
1114 if (*base == NULL)
1115 return NULL;
1117 return (void *)L1_CACHE_ALIGN((u64)*base);
1121 * Setup the infrastructure necessary to support XPartition Communication
1122 * between the specified remote partition and the local one.
1124 static enum xp_retval
1125 xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1127 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1128 enum xp_retval retval;
1129 int ret;
1130 int cpuid;
1131 int ch_number;
1132 struct xpc_channel *ch;
1133 struct timer_list *timer;
1134 short partid = XPC_PARTID(part);
1137 * Allocate all of the channel structures as a contiguous chunk of
1138 * memory.
1140 DBUG_ON(part->channels != NULL);
1141 part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS,
1142 GFP_KERNEL);
1143 if (part->channels == NULL) {
1144 dev_err(xpc_chan, "can't get memory for channels\n");
1145 return xpNoMemory;
1148 /* allocate all the required GET/PUT values */
1150 part_sn2->local_GPs =
1151 xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL,
1152 &part_sn2->local_GPs_base);
1153 if (part_sn2->local_GPs == NULL) {
1154 dev_err(xpc_chan, "can't get memory for local get/put "
1155 "values\n");
1156 retval = xpNoMemory;
1157 goto out_1;
1160 part_sn2->remote_GPs =
1161 xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL,
1162 &part_sn2->remote_GPs_base);
1163 if (part_sn2->remote_GPs == NULL) {
1164 dev_err(xpc_chan, "can't get memory for remote get/put "
1165 "values\n");
1166 retval = xpNoMemory;
1167 goto out_2;
1170 part_sn2->remote_GPs_pa = 0;
1172 /* allocate all the required open and close args */
1174 part->local_openclose_args =
1175 xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE,
1176 GFP_KERNEL,
1177 &part->local_openclose_args_base);
1178 if (part->local_openclose_args == NULL) {
1179 dev_err(xpc_chan, "can't get memory for local connect args\n");
1180 retval = xpNoMemory;
1181 goto out_3;
1184 part->remote_openclose_args =
1185 xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE,
1186 GFP_KERNEL,
1187 &part->remote_openclose_args_base);
1188 if (part->remote_openclose_args == NULL) {
1189 dev_err(xpc_chan, "can't get memory for remote connect args\n");
1190 retval = xpNoMemory;
1191 goto out_4;
1194 part_sn2->remote_openclose_args_pa = 0;
1196 part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
1197 part->chctl.all_flags = 0;
1198 spin_lock_init(&part->chctl_lock);
1200 part_sn2->notify_IRQ_nasid = 0;
1201 part_sn2->notify_IRQ_phys_cpuid = 0;
1202 part_sn2->remote_chctl_amo_va = NULL;
1204 atomic_set(&part->channel_mgr_requests, 1);
1205 init_waitqueue_head(&part->channel_mgr_wq);
1207 sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
1208 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1209 IRQF_SHARED, part_sn2->notify_IRQ_owner,
1210 (void *)(u64)partid);
1211 if (ret != 0) {
1212 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1213 "errno=%d\n", -ret);
1214 retval = xpLackOfResources;
1215 goto out_5;
1218 /* Setup a timer to check for dropped notify IRQs */
1219 timer = &part_sn2->dropped_notify_IRQ_timer;
1220 init_timer(timer);
1221 timer->function =
1222 (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
1223 timer->data = (unsigned long)part;
1224 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
1225 add_timer(timer);
1227 part->nchannels = XPC_MAX_NCHANNELS;
1229 atomic_set(&part->nchannels_active, 0);
1230 atomic_set(&part->nchannels_engaged, 0);
1232 for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
1233 ch = &part->channels[ch_number];
1235 ch->partid = partid;
1236 ch->number = ch_number;
1237 ch->flags = XPC_C_DISCONNECTED;
1239 ch->sn.sn2.local_GP = &part_sn2->local_GPs[ch_number];
1240 ch->local_openclose_args =
1241 &part->local_openclose_args[ch_number];
1243 atomic_set(&ch->kthreads_assigned, 0);
1244 atomic_set(&ch->kthreads_idle, 0);
1245 atomic_set(&ch->kthreads_active, 0);
1247 atomic_set(&ch->references, 0);
1248 atomic_set(&ch->n_to_notify, 0);
1250 spin_lock_init(&ch->lock);
1251 mutex_init(&ch->sn.sn2.msg_to_pull_mutex);
1252 init_completion(&ch->wdisconnect_wait);
1254 atomic_set(&ch->n_on_msg_allocate_wq, 0);
1255 init_waitqueue_head(&ch->msg_allocate_wq);
1256 init_waitqueue_head(&ch->idle_wq);
1260 * With the setting of the partition setup_state to XPC_P_SS_SETUP,
1261 * we're declaring that this partition is ready to go.
1263 part->setup_state = XPC_P_SS_SETUP;
1266 * Setup the per partition specific variables required by the
1267 * remote partition to establish channel connections with us.
1269 * The setting of the magic # indicates that these per partition
1270 * specific variables are ready to be used.
1272 xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs);
1273 xpc_vars_part_sn2[partid].openclose_args_pa =
1274 xp_pa(part->local_openclose_args);
1275 xpc_vars_part_sn2[partid].chctl_amo_pa =
1276 xp_pa(part_sn2->local_chctl_amo_va);
1277 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1278 xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
1279 xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
1280 cpu_physical_id(cpuid);
1281 xpc_vars_part_sn2[partid].nchannels = part->nchannels;
1282 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1;
1284 return xpSuccess;
1286 /* setup of infrastructure failed */
1287 out_5:
1288 kfree(part->remote_openclose_args_base);
1289 part->remote_openclose_args = NULL;
1290 out_4:
1291 kfree(part->local_openclose_args_base);
1292 part->local_openclose_args = NULL;
1293 out_3:
1294 kfree(part_sn2->remote_GPs_base);
1295 part_sn2->remote_GPs = NULL;
1296 out_2:
1297 kfree(part_sn2->local_GPs_base);
1298 part_sn2->local_GPs = NULL;
1299 out_1:
1300 kfree(part->channels);
1301 part->channels = NULL;
1302 return retval;
1306 * Teardown the infrastructure necessary to support XPartition Communication
1307 * between the specified remote partition and the local one.
1309 static void
1310 xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1312 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1313 short partid = XPC_PARTID(part);
1316 * We start off by making this partition inaccessible to local
1317 * processes by marking it as no longer setup. Then we make it
1318 * inaccessible to remote processes by clearing the XPC per partition
1319 * specific variable's magic # (which indicates that these variables
1320 * are no longer valid) and by ignoring all XPC notify IRQs sent to
1321 * this partition.
1324 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
1325 DBUG_ON(atomic_read(&part->nchannels_active) != 0);
1326 DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
1327 part->setup_state = XPC_P_SS_WTEARDOWN;
1329 xpc_vars_part_sn2[partid].magic = 0;
1331 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1334 * Before proceeding with the teardown we have to wait until all
1335 * existing references cease.
1337 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));
1339 /* now we can begin tearing down the infrastructure */
1341 part->setup_state = XPC_P_SS_TORNDOWN;
1343 /* in case we've still got outstanding timers registered... */
1344 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
1346 kfree(part->remote_openclose_args_base);
1347 part->remote_openclose_args = NULL;
1348 kfree(part->local_openclose_args_base);
1349 part->local_openclose_args = NULL;
1350 kfree(part_sn2->remote_GPs_base);
1351 part_sn2->remote_GPs = NULL;
1352 kfree(part_sn2->local_GPs_base);
1353 part_sn2->local_GPs = NULL;
1354 kfree(part->channels);
1355 part->channels = NULL;
1356 part_sn2->local_chctl_amo_va = NULL;
1360 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1361 * (or multiple cachelines) from a remote partition.
1363 * src_pa must be a cacheline aligned physical address on the remote partition.
1364 * dst must be a cacheline aligned virtual address on this partition.
1365 * cnt must be cacheline sized
1367 /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1368 static enum xp_retval
1369 xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
1370 const unsigned long src_pa, size_t cnt)
1372 enum xp_retval ret;
1374 DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa));
1375 DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst));
1376 DBUG_ON(cnt != L1_CACHE_ALIGN(cnt));
1378 if (part->act_state == XPC_P_AS_DEACTIVATING)
1379 return part->reason;
1381 ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt);
1382 if (ret != xpSuccess) {
1383 dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed,"
1384 " ret=%d\n", XPC_PARTID(part), ret);
1386 return ret;
1390 * Pull the remote per partition specific variables from the specified
1391 * partition.
1393 static enum xp_retval
1394 xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1396 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1397 u8 buffer[L1_CACHE_BYTES * 2];
1398 struct xpc_vars_part_sn2 *pulled_entry_cacheline =
1399 (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
1400 struct xpc_vars_part_sn2 *pulled_entry;
1401 unsigned long remote_entry_cacheline_pa;
1402 unsigned long remote_entry_pa;
1403 short partid = XPC_PARTID(part);
1404 enum xp_retval ret;
1406 /* pull the cacheline that contains the variables we're interested in */
1408 DBUG_ON(part_sn2->remote_vars_part_pa !=
1409 L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
1410 DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
1412 remote_entry_pa = part_sn2->remote_vars_part_pa +
1413 sn_partition_id * sizeof(struct xpc_vars_part_sn2);
1415 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
1417 pulled_entry = (struct xpc_vars_part_sn2 *)((u64)pulled_entry_cacheline
1418 + (remote_entry_pa &
1419 (L1_CACHE_BYTES - 1)));
1421 ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline,
1422 remote_entry_cacheline_pa,
1423 L1_CACHE_BYTES);
1424 if (ret != xpSuccess) {
1425 dev_dbg(xpc_chan, "failed to pull XPC vars_part from "
1426 "partition %d, ret=%d\n", partid, ret);
1427 return ret;
1430 /* see if they've been set up yet */
1432 if (pulled_entry->magic != XPC_VP_MAGIC1 &&
1433 pulled_entry->magic != XPC_VP_MAGIC2) {
1435 if (pulled_entry->magic != 0) {
1436 dev_dbg(xpc_chan, "partition %d's XPC vars_part for "
1437 "partition %d has bad magic value (=0x%lx)\n",
1438 partid, sn_partition_id, pulled_entry->magic);
1439 return xpBadMagic;
1442 /* they've not been initialized yet */
1443 return xpRetry;
1446 if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1) {
1448 /* validate the variables */
1450 if (pulled_entry->GPs_pa == 0 ||
1451 pulled_entry->openclose_args_pa == 0 ||
1452 pulled_entry->chctl_amo_pa == 0) {
1454 dev_err(xpc_chan, "partition %d's XPC vars_part for "
1455 "partition %d are not valid\n", partid,
1456 sn_partition_id);
1457 return xpInvalidAddress;
1460 /* the variables we imported look to be valid */
1462 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1463 part_sn2->remote_openclose_args_pa =
1464 pulled_entry->openclose_args_pa;
1465 part_sn2->remote_chctl_amo_va =
1466 (struct amo *)__va(pulled_entry->chctl_amo_pa);
1467 part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
1468 part_sn2->notify_IRQ_phys_cpuid =
1469 pulled_entry->notify_IRQ_phys_cpuid;
1471 if (part->nchannels > pulled_entry->nchannels)
1472 part->nchannels = pulled_entry->nchannels;
1474 /* let the other side know that we've pulled their variables */
1476 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2;
1479 if (pulled_entry->magic == XPC_VP_MAGIC1)
1480 return xpRetry;
1482 return xpSuccess;
1486 * Establish first contact with the remote partititon. This involves pulling
1487 * the XPC per partition variables from the remote partition and waiting for
1488 * the remote partition to pull ours.
1490 static enum xp_retval
1491 xpc_make_first_contact_sn2(struct xpc_partition *part)
1493 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1494 enum xp_retval ret;
1497 * Register the remote partition's amos with SAL so it can handle
1498 * and cleanup errors within that address range should the remote
1499 * partition go down. We don't unregister this range because it is
1500 * difficult to tell when outstanding writes to the remote partition
1501 * are finished and thus when it is safe to unregister. This should
1502 * not result in wasted space in the SAL xp_addr_region table because
1503 * we should get the same page for remote_amos_page_pa after module
1504 * reloads and system reboots.
1506 if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
1507 PAGE_SIZE, 1) < 0) {
1508 dev_warn(xpc_part, "xpc_activating(%d) failed to register "
1509 "xp_addr region\n", XPC_PARTID(part));
1511 ret = xpPhysAddrRegFailed;
1512 XPC_DEACTIVATE_PARTITION(part, ret);
1513 return ret;
1517 * Send activate IRQ to get other side to activate if they've not
1518 * already begun to do so.
1520 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
1521 cnodeid_to_nasid(0),
1522 part_sn2->activate_IRQ_nasid,
1523 part_sn2->activate_IRQ_phys_cpuid);
1525 while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
1526 if (ret != xpRetry) {
1527 XPC_DEACTIVATE_PARTITION(part, ret);
1528 return ret;
1531 dev_dbg(xpc_part, "waiting to make first contact with "
1532 "partition %d\n", XPC_PARTID(part));
1534 /* wait a 1/4 of a second or so */
1535 (void)msleep_interruptible(250);
1537 if (part->act_state == XPC_P_AS_DEACTIVATING)
1538 return part->reason;
1541 return xpSuccess;
1545 * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1547 static u64
1548 xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
1550 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1551 unsigned long irq_flags;
1552 union xpc_channel_ctl_flags chctl;
1553 enum xp_retval ret;
1556 * See if there are any chctl flags to be handled.
1559 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1560 chctl = part->chctl;
1561 if (chctl.all_flags != 0)
1562 part->chctl.all_flags = 0;
1564 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1566 if (xpc_any_openclose_chctl_flags_set(&chctl)) {
1567 ret = xpc_pull_remote_cachelines_sn2(part, part->
1568 remote_openclose_args,
1569 part_sn2->
1570 remote_openclose_args_pa,
1571 XPC_OPENCLOSE_ARGS_SIZE);
1572 if (ret != xpSuccess) {
1573 XPC_DEACTIVATE_PARTITION(part, ret);
1575 dev_dbg(xpc_chan, "failed to pull openclose args from "
1576 "partition %d, ret=%d\n", XPC_PARTID(part),
1577 ret);
1579 /* don't bother processing chctl flags anymore */
1580 chctl.all_flags = 0;
1584 if (xpc_any_msg_chctl_flags_set(&chctl)) {
1585 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1586 part_sn2->remote_GPs_pa,
1587 XPC_GP_SIZE);
1588 if (ret != xpSuccess) {
1589 XPC_DEACTIVATE_PARTITION(part, ret);
1591 dev_dbg(xpc_chan, "failed to pull GPs from partition "
1592 "%d, ret=%d\n", XPC_PARTID(part), ret);
1594 /* don't bother processing chctl flags anymore */
1595 chctl.all_flags = 0;
1599 return chctl.all_flags;
1603 * Allocate the local message queue and the notify queue.
1605 static enum xp_retval
1606 xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch)
1608 unsigned long irq_flags;
1609 int nentries;
1610 size_t nbytes;
1612 for (nentries = ch->local_nentries; nentries > 0; nentries--) {
1614 nbytes = nentries * ch->msg_size;
1615 ch->local_msgqueue =
1616 xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL,
1617 &ch->local_msgqueue_base);
1618 if (ch->local_msgqueue == NULL)
1619 continue;
1621 nbytes = nentries * sizeof(struct xpc_notify);
1622 ch->notify_queue = kzalloc(nbytes, GFP_KERNEL);
1623 if (ch->notify_queue == NULL) {
1624 kfree(ch->local_msgqueue_base);
1625 ch->local_msgqueue = NULL;
1626 continue;
1629 spin_lock_irqsave(&ch->lock, irq_flags);
1630 if (nentries < ch->local_nentries) {
1631 dev_dbg(xpc_chan, "nentries=%d local_nentries=%d, "
1632 "partid=%d, channel=%d\n", nentries,
1633 ch->local_nentries, ch->partid, ch->number);
1635 ch->local_nentries = nentries;
1637 spin_unlock_irqrestore(&ch->lock, irq_flags);
1638 return xpSuccess;
1641 dev_dbg(xpc_chan, "can't get memory for local message queue and notify "
1642 "queue, partid=%d, channel=%d\n", ch->partid, ch->number);
1643 return xpNoMemory;
1647 * Allocate the cached remote message queue.
1649 static enum xp_retval
1650 xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch)
1652 unsigned long irq_flags;
1653 int nentries;
1654 size_t nbytes;
1656 DBUG_ON(ch->remote_nentries <= 0);
1658 for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
1660 nbytes = nentries * ch->msg_size;
1661 ch->remote_msgqueue =
1662 xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL,
1663 &ch->remote_msgqueue_base);
1664 if (ch->remote_msgqueue == NULL)
1665 continue;
1667 spin_lock_irqsave(&ch->lock, irq_flags);
1668 if (nentries < ch->remote_nentries) {
1669 dev_dbg(xpc_chan, "nentries=%d remote_nentries=%d, "
1670 "partid=%d, channel=%d\n", nentries,
1671 ch->remote_nentries, ch->partid, ch->number);
1673 ch->remote_nentries = nentries;
1675 spin_unlock_irqrestore(&ch->lock, irq_flags);
1676 return xpSuccess;
1679 dev_dbg(xpc_chan, "can't get memory for cached remote message queue, "
1680 "partid=%d, channel=%d\n", ch->partid, ch->number);
1681 return xpNoMemory;
1685 * Allocate message queues and other stuff associated with a channel.
1687 * Note: Assumes all of the channel sizes are filled in.
1689 static enum xp_retval
1690 xpc_allocate_msgqueues_sn2(struct xpc_channel *ch)
1692 enum xp_retval ret;
1694 DBUG_ON(ch->flags & XPC_C_SETUP);
1696 ret = xpc_allocate_local_msgqueue_sn2(ch);
1697 if (ret == xpSuccess) {
1699 ret = xpc_allocate_remote_msgqueue_sn2(ch);
1700 if (ret != xpSuccess) {
1701 kfree(ch->local_msgqueue_base);
1702 ch->local_msgqueue = NULL;
1703 kfree(ch->notify_queue);
1704 ch->notify_queue = NULL;
1707 return ret;
1711 * Free up message queues and other stuff that were allocated for the specified
1712 * channel.
1714 * Note: ch->reason and ch->reason_line are left set for debugging purposes,
1715 * they're cleared when XPC_C_DISCONNECTED is cleared.
1717 static void
1718 xpc_free_msgqueues_sn2(struct xpc_channel *ch)
1720 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1722 DBUG_ON(!spin_is_locked(&ch->lock));
1723 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
1725 ch->remote_msgqueue_pa = 0;
1726 ch->func = NULL;
1727 ch->key = NULL;
1728 ch->msg_size = 0;
1729 ch->local_nentries = 0;
1730 ch->remote_nentries = 0;
1731 ch->kthreads_assigned_limit = 0;
1732 ch->kthreads_idle_limit = 0;
1734 ch_sn2->local_GP->get = 0;
1735 ch_sn2->local_GP->put = 0;
1736 ch_sn2->remote_GP.get = 0;
1737 ch_sn2->remote_GP.put = 0;
1738 ch_sn2->w_local_GP.get = 0;
1739 ch_sn2->w_local_GP.put = 0;
1740 ch_sn2->w_remote_GP.get = 0;
1741 ch_sn2->w_remote_GP.put = 0;
1742 ch_sn2->next_msg_to_pull = 0;
1744 if (ch->flags & XPC_C_SETUP) {
1745 dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n",
1746 ch->flags, ch->partid, ch->number);
1748 kfree(ch->local_msgqueue_base);
1749 ch->local_msgqueue = NULL;
1750 kfree(ch->remote_msgqueue_base);
1751 ch->remote_msgqueue = NULL;
1752 kfree(ch->notify_queue);
1753 ch->notify_queue = NULL;
1758 * Notify those who wanted to be notified upon delivery of their message.
1760 static void
1761 xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1763 struct xpc_notify *notify;
1764 u8 notify_type;
1765 s64 get = ch->sn.sn2.w_remote_GP.get - 1;
1767 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1769 notify = &ch->notify_queue[get % ch->local_nentries];
1772 * See if the notify entry indicates it was associated with
1773 * a message who's sender wants to be notified. It is possible
1774 * that it is, but someone else is doing or has done the
1775 * notification.
1777 notify_type = notify->type;
1778 if (notify_type == 0 ||
1779 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
1780 continue;
1783 DBUG_ON(notify_type != XPC_N_CALL);
1785 atomic_dec(&ch->n_to_notify);
1787 if (notify->func != NULL) {
1788 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
1789 "msg_number=%ld, partid=%d, channel=%d\n",
1790 (void *)notify, get, ch->partid, ch->number);
1792 notify->func(reason, ch->partid, ch->number,
1793 notify->key);
1795 dev_dbg(xpc_chan, "notify->func() returned, "
1796 "notify=0x%p, msg_number=%ld, partid=%d, "
1797 "channel=%d\n", (void *)notify, get,
1798 ch->partid, ch->number);
1803 static void
1804 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
1806 xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
1810 * Clear some of the msg flags in the local message queue.
1812 static inline void
1813 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1815 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1816 struct xpc_msg *msg;
1817 s64 get;
1819 get = ch_sn2->w_remote_GP.get;
1820 do {
1821 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1822 (get % ch->local_nentries) *
1823 ch->msg_size);
1824 msg->flags = 0;
1825 } while (++get < ch_sn2->remote_GP.get);
1829 * Clear some of the msg flags in the remote message queue.
1831 static inline void
1832 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1834 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1835 struct xpc_msg *msg;
1836 s64 put;
1838 put = ch_sn2->w_remote_GP.put;
1839 do {
1840 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1841 (put % ch->remote_nentries) *
1842 ch->msg_size);
1843 msg->flags = 0;
1844 } while (++put < ch_sn2->remote_GP.put);
1847 static void
1848 xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1850 struct xpc_channel *ch = &part->channels[ch_number];
1851 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1852 int nmsgs_sent;
1854 ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
1856 /* See what, if anything, has changed for each connected channel */
1858 xpc_msgqueue_ref(ch);
1860 if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
1861 ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
1862 /* nothing changed since GPs were last pulled */
1863 xpc_msgqueue_deref(ch);
1864 return;
1867 if (!(ch->flags & XPC_C_CONNECTED)) {
1868 xpc_msgqueue_deref(ch);
1869 return;
1873 * First check to see if messages recently sent by us have been
1874 * received by the other side. (The remote GET value will have
1875 * changed since we last looked at it.)
1878 if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
1881 * We need to notify any senders that want to be notified
1882 * that their sent messages have been received by their
1883 * intended recipients. We need to do this before updating
1884 * w_remote_GP.get so that we don't allocate the same message
1885 * queue entries prematurely (see xpc_allocate_msg()).
1887 if (atomic_read(&ch->n_to_notify) > 0) {
1889 * Notify senders that messages sent have been
1890 * received and delivered by the other side.
1892 xpc_notify_senders_sn2(ch, xpMsgDelivered,
1893 ch_sn2->remote_GP.get);
1897 * Clear msg->flags in previously sent messages, so that
1898 * they're ready for xpc_allocate_msg().
1900 xpc_clear_local_msgqueue_flags_sn2(ch);
1902 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1904 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1905 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1906 ch->number);
1909 * If anyone was waiting for message queue entries to become
1910 * available, wake them up.
1912 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1913 wake_up(&ch->msg_allocate_wq);
1917 * Now check for newly sent messages by the other side. (The remote
1918 * PUT value will have changed since we last looked at it.)
1921 if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
1923 * Clear msg->flags in previously received messages, so that
1924 * they're ready for xpc_get_deliverable_msg().
1926 xpc_clear_remote_msgqueue_flags_sn2(ch);
1928 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1930 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1931 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1932 ch->number);
1934 nmsgs_sent = ch_sn2->w_remote_GP.put - ch_sn2->w_local_GP.get;
1935 if (nmsgs_sent > 0) {
1936 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1937 "delivered=%d, partid=%d, channel=%d\n",
1938 nmsgs_sent, ch->partid, ch->number);
1940 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1941 xpc_activate_kthreads(ch, nmsgs_sent);
1945 xpc_msgqueue_deref(ch);
1948 static struct xpc_msg *
1949 xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1951 struct xpc_partition *part = &xpc_partitions[ch->partid];
1952 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1953 unsigned long remote_msg_pa;
1954 struct xpc_msg *msg;
1955 u32 msg_index;
1956 u32 nmsgs;
1957 u64 msg_offset;
1958 enum xp_retval ret;
1960 if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
1961 /* we were interrupted by a signal */
1962 return NULL;
1965 while (get >= ch_sn2->next_msg_to_pull) {
1967 /* pull as many messages as are ready and able to be pulled */
1969 msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
1971 DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
1972 nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
1973 if (msg_index + nmsgs > ch->remote_nentries) {
1974 /* ignore the ones that wrap the msg queue for now */
1975 nmsgs = ch->remote_nentries - msg_index;
1978 msg_offset = msg_index * ch->msg_size;
1979 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
1980 remote_msg_pa = ch->remote_msgqueue_pa + msg_offset;
1982 ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa,
1983 nmsgs * ch->msg_size);
1984 if (ret != xpSuccess) {
1986 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1987 " msg %ld from partition %d, channel=%d, "
1988 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1989 ch->partid, ch->number, ret);
1991 XPC_DEACTIVATE_PARTITION(part, ret);
1993 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1994 return NULL;
1997 ch_sn2->next_msg_to_pull += nmsgs;
2000 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
2002 /* return the message we were looking for */
2003 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
2004 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset);
2006 return msg;
2009 static int
2010 xpc_n_of_deliverable_msgs_sn2(struct xpc_channel *ch)
2012 return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
2016 * Get a message to be delivered.
2018 static struct xpc_msg *
2019 xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
2021 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2022 struct xpc_msg *msg = NULL;
2023 s64 get;
2025 do {
2026 if (ch->flags & XPC_C_DISCONNECTING)
2027 break;
2029 get = ch_sn2->w_local_GP.get;
2030 rmb(); /* guarantee that .get loads before .put */
2031 if (get == ch_sn2->w_remote_GP.put)
2032 break;
2034 /* There are messages waiting to be pulled and delivered.
2035 * We need to try to secure one for ourselves. We'll do this
2036 * by trying to increment w_local_GP.get and hope that no one
2037 * else beats us to it. If they do, we'll we'll simply have
2038 * to try again for the next one.
2041 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
2042 /* we got the entry referenced by get */
2044 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
2045 "partid=%d, channel=%d\n", get + 1,
2046 ch->partid, ch->number);
2048 /* pull the message from the remote partition */
2050 msg = xpc_pull_remote_msg_sn2(ch, get);
2052 DBUG_ON(msg != NULL && msg->number != get);
2053 DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE));
2054 DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY));
2056 break;
2059 } while (1);
2061 return msg;
2065 * Now we actually send the messages that are ready to be sent by advancing
2066 * the local message queue's Put value and then send a chctl msgrequest to the
2067 * recipient partition.
2069 static void
2070 xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
2072 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2073 struct xpc_msg *msg;
2074 s64 put = initial_put + 1;
2075 int send_msgrequest = 0;
2077 while (1) {
2079 while (1) {
2080 if (put == ch_sn2->w_local_GP.put)
2081 break;
2083 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
2084 (put % ch->local_nentries) *
2085 ch->msg_size);
2087 if (!(msg->flags & XPC_M_READY))
2088 break;
2090 put++;
2093 if (put == initial_put) {
2094 /* nothing's changed */
2095 break;
2098 if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
2099 initial_put) {
2100 /* someone else beat us to it */
2101 DBUG_ON(ch_sn2->local_GP->put < initial_put);
2102 break;
2105 /* we just set the new value of local_GP->put */
2107 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
2108 "channel=%d\n", put, ch->partid, ch->number);
2110 send_msgrequest = 1;
2113 * We need to ensure that the message referenced by
2114 * local_GP->put is not XPC_M_READY or that local_GP->put
2115 * equals w_local_GP.put, so we'll go have a look.
2117 initial_put = put;
2120 if (send_msgrequest)
2121 xpc_send_chctl_msgrequest_sn2(ch);
2125 * Allocate an entry for a message from the message queue associated with the
2126 * specified channel.
2128 static enum xp_retval
2129 xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
2130 struct xpc_msg **address_of_msg)
2132 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2133 struct xpc_msg *msg;
2134 enum xp_retval ret;
2135 s64 put;
2138 * Get the next available message entry from the local message queue.
2139 * If none are available, we'll make sure that we grab the latest
2140 * GP values.
2142 ret = xpTimeout;
2144 while (1) {
2146 put = ch_sn2->w_local_GP.put;
2147 rmb(); /* guarantee that .put loads before .get */
2148 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
2150 /* There are available message entries. We need to try
2151 * to secure one for ourselves. We'll do this by trying
2152 * to increment w_local_GP.put as long as someone else
2153 * doesn't beat us to it. If they do, we'll have to
2154 * try again.
2156 if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
2157 put) {
2158 /* we got the entry referenced by put */
2159 break;
2161 continue; /* try again */
2165 * There aren't any available msg entries at this time.
2167 * In waiting for a message entry to become available,
2168 * we set a timeout in case the other side is not sending
2169 * completion interrupts. This lets us fake a notify IRQ
2170 * that will cause the notify IRQ handler to fetch the latest
2171 * GP values as if an interrupt was sent by the other side.
2173 if (ret == xpTimeout)
2174 xpc_send_chctl_local_msgrequest_sn2(ch);
2176 if (flags & XPC_NOWAIT)
2177 return xpNoWait;
2179 ret = xpc_allocate_msg_wait(ch);
2180 if (ret != xpInterrupted && ret != xpTimeout)
2181 return ret;
2184 /* get the message's address and initialize it */
2185 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
2186 (put % ch->local_nentries) * ch->msg_size);
2188 DBUG_ON(msg->flags != 0);
2189 msg->number = put;
2191 dev_dbg(xpc_chan, "w_local_GP.put changed to %ld; msg=0x%p, "
2192 "msg_number=%ld, partid=%d, channel=%d\n", put + 1,
2193 (void *)msg, msg->number, ch->partid, ch->number);
2195 *address_of_msg = msg;
2196 return xpSuccess;
2200 * Common code that does the actual sending of the message by advancing the
2201 * local message queue's Put value and sends a chctl msgrequest to the
2202 * partition the message is being sent to.
2204 static enum xp_retval
2205 xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
2206 u16 payload_size, u8 notify_type, xpc_notify_func func,
2207 void *key)
2209 enum xp_retval ret = xpSuccess;
2210 struct xpc_msg *msg = msg;
2211 struct xpc_notify *notify = notify;
2212 s64 msg_number;
2213 s64 put;
2215 DBUG_ON(notify_type == XPC_N_CALL && func == NULL);
2217 if (XPC_MSG_SIZE(payload_size) > ch->msg_size)
2218 return xpPayloadTooBig;
2220 xpc_msgqueue_ref(ch);
2222 if (ch->flags & XPC_C_DISCONNECTING) {
2223 ret = ch->reason;
2224 goto out_1;
2226 if (!(ch->flags & XPC_C_CONNECTED)) {
2227 ret = xpNotConnected;
2228 goto out_1;
2231 ret = xpc_allocate_msg_sn2(ch, flags, &msg);
2232 if (ret != xpSuccess)
2233 goto out_1;
2235 msg_number = msg->number;
2237 if (notify_type != 0) {
2239 * Tell the remote side to send an ACK interrupt when the
2240 * message has been delivered.
2242 msg->flags |= XPC_M_INTERRUPT;
2244 atomic_inc(&ch->n_to_notify);
2246 notify = &ch->notify_queue[msg_number % ch->local_nentries];
2247 notify->func = func;
2248 notify->key = key;
2249 notify->type = notify_type;
2251 /* ??? Is a mb() needed here? */
2253 if (ch->flags & XPC_C_DISCONNECTING) {
2255 * An error occurred between our last error check and
2256 * this one. We will try to clear the type field from
2257 * the notify entry. If we succeed then
2258 * xpc_disconnect_channel() didn't already process
2259 * the notify entry.
2261 if (cmpxchg(&notify->type, notify_type, 0) ==
2262 notify_type) {
2263 atomic_dec(&ch->n_to_notify);
2264 ret = ch->reason;
2266 goto out_1;
2270 memcpy(&msg->payload, payload, payload_size);
2272 msg->flags |= XPC_M_READY;
2275 * The preceding store of msg->flags must occur before the following
2276 * load of local_GP->put.
2278 mb();
2280 /* see if the message is next in line to be sent, if so send it */
2282 put = ch->sn.sn2.local_GP->put;
2283 if (put == msg_number)
2284 xpc_send_msgs_sn2(ch, put);
2286 out_1:
2287 xpc_msgqueue_deref(ch);
2288 return ret;
2292 * Now we actually acknowledge the messages that have been delivered and ack'd
2293 * by advancing the cached remote message queue's Get value and if requested
2294 * send a chctl msgrequest to the message sender's partition.
2296 static void
2297 xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2299 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2300 struct xpc_msg *msg;
2301 s64 get = initial_get + 1;
2302 int send_msgrequest = 0;
2304 while (1) {
2306 while (1) {
2307 if (get == ch_sn2->w_local_GP.get)
2308 break;
2310 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
2311 (get % ch->remote_nentries) *
2312 ch->msg_size);
2314 if (!(msg->flags & XPC_M_DONE))
2315 break;
2317 msg_flags |= msg->flags;
2318 get++;
2321 if (get == initial_get) {
2322 /* nothing's changed */
2323 break;
2326 if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
2327 initial_get) {
2328 /* someone else beat us to it */
2329 DBUG_ON(ch_sn2->local_GP->get <= initial_get);
2330 break;
2333 /* we just set the new value of local_GP->get */
2335 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2336 "channel=%d\n", get, ch->partid, ch->number);
2338 send_msgrequest = (msg_flags & XPC_M_INTERRUPT);
2341 * We need to ensure that the message referenced by
2342 * local_GP->get is not XPC_M_DONE or that local_GP->get
2343 * equals w_local_GP.get, so we'll go have a look.
2345 initial_get = get;
2348 if (send_msgrequest)
2349 xpc_send_chctl_msgrequest_sn2(ch);
2352 static void
2353 xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
2355 s64 get;
2356 s64 msg_number = msg->number;
2358 dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2359 (void *)msg, msg_number, ch->partid, ch->number);
2361 DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) !=
2362 msg_number % ch->remote_nentries);
2363 DBUG_ON(msg->flags & XPC_M_DONE);
2365 msg->flags |= XPC_M_DONE;
2368 * The preceding store of msg->flags must occur before the following
2369 * load of local_GP->get.
2371 mb();
2374 * See if this message is next in line to be acknowledged as having
2375 * been delivered.
2377 get = ch->sn.sn2.local_GP->get;
2378 if (get == msg_number)
2379 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
2383 xpc_init_sn2(void)
2385 int ret;
2386 size_t buf_size;
2388 xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2;
2389 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2;
2390 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
2391 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
2392 xpc_online_heartbeat = xpc_online_heartbeat_sn2;
2393 xpc_heartbeat_init = xpc_heartbeat_init_sn2;
2394 xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
2395 xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2;
2397 xpc_request_partition_activation = xpc_request_partition_activation_sn2;
2398 xpc_request_partition_reactivation =
2399 xpc_request_partition_reactivation_sn2;
2400 xpc_request_partition_deactivation =
2401 xpc_request_partition_deactivation_sn2;
2402 xpc_cancel_partition_deactivation_request =
2403 xpc_cancel_partition_deactivation_request_sn2;
2405 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
2406 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
2407 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
2408 xpc_make_first_contact = xpc_make_first_contact_sn2;
2409 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
2410 xpc_allocate_msgqueues = xpc_allocate_msgqueues_sn2;
2411 xpc_free_msgqueues = xpc_free_msgqueues_sn2;
2412 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2413 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
2414 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2;
2415 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2;
2417 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
2418 xpc_partition_engaged = xpc_partition_engaged_sn2;
2419 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
2420 xpc_indicate_partition_disengaged =
2421 xpc_indicate_partition_disengaged_sn2;
2422 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
2424 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
2425 xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
2426 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
2427 xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
2429 xpc_send_msg = xpc_send_msg_sn2;
2430 xpc_received_msg = xpc_received_msg_sn2;
2432 buf_size = max(XPC_RP_VARS_SIZE,
2433 XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2);
2434 xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size,
2435 GFP_KERNEL,
2436 &xpc_remote_copy_buffer_base_sn2);
2437 if (xpc_remote_copy_buffer_sn2 == NULL) {
2438 dev_err(xpc_part, "can't get memory for remote copy buffer\n");
2439 return -ENOMEM;
2442 /* open up protections for IPI and [potentially] amo operations */
2443 xpc_allow_IPI_ops_sn2();
2444 xpc_allow_amo_ops_shub_wars_1_1_sn2();
2447 * This is safe to do before the xpc_hb_checker thread has started
2448 * because the handler releases a wait queue. If an interrupt is
2449 * received before the thread is waiting, it will not go to sleep,
2450 * but rather immediately process the interrupt.
2452 ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
2453 "xpc hb", NULL);
2454 if (ret != 0) {
2455 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
2456 "errno=%d\n", -ret);
2457 xpc_disallow_IPI_ops_sn2();
2458 kfree(xpc_remote_copy_buffer_base_sn2);
2460 return ret;
2463 void
2464 xpc_exit_sn2(void)
2466 free_irq(SGI_XPC_ACTIVATE, NULL);
2467 xpc_disallow_IPI_ops_sn2();
2468 kfree(xpc_remote_copy_buffer_base_sn2);