2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) sn2-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/delay.h>
17 #include <asm/uncached.h>
18 #include <asm/sn/mspec.h>
19 #include <asm/sn/sn_sal.h>
23 * Define the number of u64s required to represent all the C-brick nasids
24 * as a bitmap. The cross-partition kernel modules deal only with
25 * C-brick nasids, thus the need for bitmaps which don't account for
26 * odd-numbered (non C-brick) nasids.
28 #define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2)
29 #define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8)
30 #define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64)
33 * Memory for XPC's amo variables is allocated by the MSPEC driver. These
34 * pages are located in the lowest granule. The lowest granule uses 4k pages
35 * for cached references and an alternate TLB handler to never provide a
36 * cacheable mapping for the entire region. This will prevent speculative
37 * reading of cached copies of our lines from being issued which will cause
38 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
39 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
40 * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify
41 * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
42 * partitions (i.e., XPCs) consider themselves currently engaged with the
43 * local XPC and 1 amo variable to request partition deactivation.
45 #define XPC_NOTIFY_IRQ_AMOS_SN2 0
46 #define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \
47 XP_MAX_NPARTITIONS_SN2)
48 #define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \
49 XP_NASID_MASK_WORDS_SN2)
50 #define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1)
53 * Buffer used to store a local copy of portions of a remote partition's
54 * reserved page (either its header and part_nasids mask, or its vars).
56 static void *xpc_remote_copy_buffer_base_sn2
;
57 static char *xpc_remote_copy_buffer_sn2
;
59 static struct xpc_vars_sn2
*xpc_vars_sn2
;
60 static struct xpc_vars_part_sn2
*xpc_vars_part_sn2
;
63 xpc_setup_partitions_sn_sn2(void)
65 /* nothing needs to be done */
70 xpc_teardown_partitions_sn_sn2(void)
72 /* nothing needs to be done */
75 /* SH_IPI_ACCESS shub register value on startup */
76 static u64 xpc_sh1_IPI_access_sn2
;
77 static u64 xpc_sh2_IPI_access0_sn2
;
78 static u64 xpc_sh2_IPI_access1_sn2
;
79 static u64 xpc_sh2_IPI_access2_sn2
;
80 static u64 xpc_sh2_IPI_access3_sn2
;
83 * Change protections to allow IPI operations.
86 xpc_allow_IPI_ops_sn2(void)
91 /* !!! The following should get moved into SAL. */
93 xpc_sh2_IPI_access0_sn2
=
94 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0
));
95 xpc_sh2_IPI_access1_sn2
=
96 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1
));
97 xpc_sh2_IPI_access2_sn2
=
98 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2
));
99 xpc_sh2_IPI_access3_sn2
=
100 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3
));
102 for_each_online_node(node
) {
103 nasid
= cnodeid_to_nasid(node
);
104 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS0
),
106 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS1
),
108 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS2
),
110 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS3
),
114 xpc_sh1_IPI_access_sn2
=
115 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH1_IPI_ACCESS
));
117 for_each_online_node(node
) {
118 nasid
= cnodeid_to_nasid(node
);
119 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH1_IPI_ACCESS
),
126 * Restrict protections to disallow IPI operations.
129 xpc_disallow_IPI_ops_sn2(void)
134 /* !!! The following should get moved into SAL. */
136 for_each_online_node(node
) {
137 nasid
= cnodeid_to_nasid(node
);
138 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS0
),
139 xpc_sh2_IPI_access0_sn2
);
140 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS1
),
141 xpc_sh2_IPI_access1_sn2
);
142 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS2
),
143 xpc_sh2_IPI_access2_sn2
);
144 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS3
),
145 xpc_sh2_IPI_access3_sn2
);
148 for_each_online_node(node
) {
149 nasid
= cnodeid_to_nasid(node
);
150 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH1_IPI_ACCESS
),
151 xpc_sh1_IPI_access_sn2
);
157 * The following set of functions are used for the sending and receiving of
158 * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
159 * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
160 * is associated with channel activity (SGI_XPC_NOTIFY).
164 xpc_receive_IRQ_amo_sn2(struct amo
*amo
)
166 return FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_CLEAR
);
169 static enum xp_retval
170 xpc_send_IRQ_sn2(struct amo
*amo
, u64 flag
, int nasid
, int phys_cpuid
,
174 unsigned long irq_flags
;
176 local_irq_save(irq_flags
);
178 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
, flag
);
179 sn_send_IPI_phys(nasid
, phys_cpuid
, vector
, 0);
182 * We must always use the nofault function regardless of whether we
183 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
184 * didn't, we'd never know that the other partition is down and would
185 * keep sending IRQs and amos to it until the heartbeat times out.
187 ret
= xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->variable
),
188 xp_nofault_PIOR_target
));
190 local_irq_restore(irq_flags
);
192 return (ret
== 0) ? xpSuccess
: xpPioReadError
;
196 xpc_init_IRQ_amo_sn2(int index
)
198 struct amo
*amo
= xpc_vars_sn2
->amos_page
+ index
;
200 (void)xpc_receive_IRQ_amo_sn2(amo
); /* clear amo variable */
205 * Functions associated with SGI_XPC_ACTIVATE IRQ.
209 * Notify the heartbeat check thread that an activate IRQ has been received.
212 xpc_handle_activate_IRQ_sn2(int irq
, void *dev_id
)
214 unsigned long irq_flags
;
216 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
217 xpc_activate_IRQ_rcvd
++;
218 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
220 wake_up_interruptible(&xpc_activate_IRQ_wq
);
225 * Flag the appropriate amo variable and send an IRQ to the specified node.
228 xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa
, int from_nasid
,
229 int to_nasid
, int to_phys_cpuid
)
231 struct amo
*amos
= (struct amo
*)__va(amos_page_pa
+
232 (XPC_ACTIVATE_IRQ_AMOS_SN2
*
233 sizeof(struct amo
)));
235 (void)xpc_send_IRQ_sn2(&amos
[BIT_WORD(from_nasid
/ 2)],
236 BIT_MASK(from_nasid
/ 2), to_nasid
,
237 to_phys_cpuid
, SGI_XPC_ACTIVATE
);
241 xpc_send_local_activate_IRQ_sn2(int from_nasid
)
243 unsigned long irq_flags
;
244 struct amo
*amos
= (struct amo
*)__va(xpc_vars_sn2
->amos_page_pa
+
245 (XPC_ACTIVATE_IRQ_AMOS_SN2
*
246 sizeof(struct amo
)));
248 /* fake the sending and receipt of an activate IRQ from remote nasid */
249 FETCHOP_STORE_OP(TO_AMO((u64
)&amos
[BIT_WORD(from_nasid
/ 2)].variable
),
250 FETCHOP_OR
, BIT_MASK(from_nasid
/ 2));
252 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
253 xpc_activate_IRQ_rcvd
++;
254 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
256 wake_up_interruptible(&xpc_activate_IRQ_wq
);
260 * Functions associated with SGI_XPC_NOTIFY IRQ.
264 * Check to see if any chctl flags were sent from the specified partition.
267 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition
*part
)
269 union xpc_channel_ctl_flags chctl
;
270 unsigned long irq_flags
;
272 chctl
.all_flags
= xpc_receive_IRQ_amo_sn2(part
->sn
.sn2
.
274 if (chctl
.all_flags
== 0)
277 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
278 part
->chctl
.all_flags
|= chctl
.all_flags
;
279 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
281 dev_dbg(xpc_chan
, "received notify IRQ from partid=%d, chctl.all_flags="
282 "0x%lx\n", XPC_PARTID(part
), chctl
.all_flags
);
284 xpc_wakeup_channel_mgr(part
);
288 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
289 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
290 * than one partition, we use an amo structure per partition to indicate
291 * whether a partition has sent an IRQ or not. If it has, then wake up the
292 * associated kthread to handle it.
294 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
295 * running on other partitions.
297 * Noteworthy Arguments:
299 * irq - Interrupt ReQuest number. NOT USED.
301 * dev_id - partid of IRQ's potential sender.
304 xpc_handle_notify_IRQ_sn2(int irq
, void *dev_id
)
306 short partid
= (short)(u64
)dev_id
;
307 struct xpc_partition
*part
= &xpc_partitions
[partid
];
309 DBUG_ON(partid
< 0 || partid
>= XP_MAX_NPARTITIONS_SN2
);
311 if (xpc_part_ref(part
)) {
312 xpc_check_for_sent_chctl_flags_sn2(part
);
314 xpc_part_deref(part
);
320 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
321 * because the write to their associated amo variable completed after the IRQ
325 xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition
*part
)
327 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
329 if (xpc_part_ref(part
)) {
330 xpc_check_for_sent_chctl_flags_sn2(part
);
332 part_sn2
->dropped_notify_IRQ_timer
.expires
= jiffies
+
333 XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL
;
334 add_timer(&part_sn2
->dropped_notify_IRQ_timer
);
335 xpc_part_deref(part
);
340 * Send a notify IRQ to the remote partition that is associated with the
344 xpc_send_notify_IRQ_sn2(struct xpc_channel
*ch
, u8 chctl_flag
,
345 char *chctl_flag_string
, unsigned long *irq_flags
)
347 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
348 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
349 union xpc_channel_ctl_flags chctl
= { 0 };
352 if (likely(part
->act_state
!= XPC_P_AS_DEACTIVATING
)) {
353 chctl
.flags
[ch
->number
] = chctl_flag
;
354 ret
= xpc_send_IRQ_sn2(part_sn2
->remote_chctl_amo_va
,
356 part_sn2
->notify_IRQ_nasid
,
357 part_sn2
->notify_IRQ_phys_cpuid
,
359 dev_dbg(xpc_chan
, "%s sent to partid=%d, channel=%d, ret=%d\n",
360 chctl_flag_string
, ch
->partid
, ch
->number
, ret
);
361 if (unlikely(ret
!= xpSuccess
)) {
362 if (irq_flags
!= NULL
)
363 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
364 XPC_DEACTIVATE_PARTITION(part
, ret
);
365 if (irq_flags
!= NULL
)
366 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
371 #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
372 xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
375 * Make it look like the remote partition, which is associated with the
376 * specified channel, sent us a notify IRQ. This faked IRQ will be handled
377 * by xpc_check_for_dropped_notify_IRQ_sn2().
380 xpc_send_local_notify_IRQ_sn2(struct xpc_channel
*ch
, u8 chctl_flag
,
381 char *chctl_flag_string
)
383 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
384 union xpc_channel_ctl_flags chctl
= { 0 };
386 chctl
.flags
[ch
->number
] = chctl_flag
;
387 FETCHOP_STORE_OP(TO_AMO((u64
)&part
->sn
.sn2
.local_chctl_amo_va
->
388 variable
), FETCHOP_OR
, chctl
.all_flags
);
389 dev_dbg(xpc_chan
, "%s sent local from partid=%d, channel=%d\n",
390 chctl_flag_string
, ch
->partid
, ch
->number
);
393 #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
394 xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
397 xpc_send_chctl_closerequest_sn2(struct xpc_channel
*ch
,
398 unsigned long *irq_flags
)
400 struct xpc_openclose_args
*args
= ch
->sn
.sn2
.local_openclose_args
;
402 args
->reason
= ch
->reason
;
403 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_CLOSEREQUEST
, irq_flags
);
407 xpc_send_chctl_closereply_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
409 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_CLOSEREPLY
, irq_flags
);
413 xpc_send_chctl_openrequest_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
415 struct xpc_openclose_args
*args
= ch
->sn
.sn2
.local_openclose_args
;
417 args
->entry_size
= ch
->entry_size
;
418 args
->local_nentries
= ch
->local_nentries
;
419 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_OPENREQUEST
, irq_flags
);
423 xpc_send_chctl_openreply_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
425 struct xpc_openclose_args
*args
= ch
->sn
.sn2
.local_openclose_args
;
427 args
->remote_nentries
= ch
->remote_nentries
;
428 args
->local_nentries
= ch
->local_nentries
;
429 args
->local_msgqueue_pa
= xp_pa(ch
->sn
.sn2
.local_msgqueue
);
430 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_OPENREPLY
, irq_flags
);
434 xpc_send_chctl_msgrequest_sn2(struct xpc_channel
*ch
)
436 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_MSGREQUEST
, NULL
);
440 xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel
*ch
)
442 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_MSGREQUEST
);
445 static enum xp_retval
446 xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel
*ch
,
447 unsigned long msgqueue_pa
)
449 ch
->sn
.sn2
.remote_msgqueue_pa
= msgqueue_pa
;
454 * This next set of functions are used to keep track of when a partition is
455 * potentially engaged in accessing memory belonging to another partition.
459 xpc_indicate_partition_engaged_sn2(struct xpc_partition
*part
)
461 unsigned long irq_flags
;
462 struct amo
*amo
= (struct amo
*)__va(part
->sn
.sn2
.remote_amos_page_pa
+
463 (XPC_ENGAGED_PARTITIONS_AMO_SN2
*
464 sizeof(struct amo
)));
466 local_irq_save(irq_flags
);
468 /* set bit corresponding to our partid in remote partition's amo */
469 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
,
470 BIT(sn_partition_id
));
473 * We must always use the nofault function regardless of whether we
474 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
475 * didn't, we'd never know that the other partition is down and would
476 * keep sending IRQs and amos to it until the heartbeat times out.
478 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
480 xp_nofault_PIOR_target
));
482 local_irq_restore(irq_flags
);
486 xpc_indicate_partition_disengaged_sn2(struct xpc_partition
*part
)
488 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
489 unsigned long irq_flags
;
490 struct amo
*amo
= (struct amo
*)__va(part_sn2
->remote_amos_page_pa
+
491 (XPC_ENGAGED_PARTITIONS_AMO_SN2
*
492 sizeof(struct amo
)));
494 local_irq_save(irq_flags
);
496 /* clear bit corresponding to our partid in remote partition's amo */
497 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
498 ~BIT(sn_partition_id
));
501 * We must always use the nofault function regardless of whether we
502 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
503 * didn't, we'd never know that the other partition is down and would
504 * keep sending IRQs and amos to it until the heartbeat times out.
506 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
508 xp_nofault_PIOR_target
));
510 local_irq_restore(irq_flags
);
513 * Send activate IRQ to get other side to see that we've cleared our
514 * bit in their engaged partitions amo.
516 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
518 part_sn2
->activate_IRQ_nasid
,
519 part_sn2
->activate_IRQ_phys_cpuid
);
523 xpc_assume_partition_disengaged_sn2(short partid
)
525 struct amo
*amo
= xpc_vars_sn2
->amos_page
+
526 XPC_ENGAGED_PARTITIONS_AMO_SN2
;
528 /* clear bit(s) based on partid mask in our partition's amo */
529 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
534 xpc_partition_engaged_sn2(short partid
)
536 struct amo
*amo
= xpc_vars_sn2
->amos_page
+
537 XPC_ENGAGED_PARTITIONS_AMO_SN2
;
539 /* our partition's amo variable ANDed with partid mask */
540 return (FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) &
545 xpc_any_partition_engaged_sn2(void)
547 struct amo
*amo
= xpc_vars_sn2
->amos_page
+
548 XPC_ENGAGED_PARTITIONS_AMO_SN2
;
550 /* our partition's amo variable */
551 return FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) != 0;
554 /* original protection values for each node */
555 static u64 xpc_prot_vec_sn2
[MAX_NUMNODES
];
558 * Change protections to allow amo operations on non-Shub 1.1 systems.
560 static enum xp_retval
561 xpc_allow_amo_ops_sn2(struct amo
*amos_page
)
563 enum xp_retval ret
= xpSuccess
;
566 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
567 * collides with memory operations. On those systems we call
568 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
570 if (!enable_shub_wars_1_1())
571 ret
= xp_expand_memprotect(ia64_tpa((u64
)amos_page
), PAGE_SIZE
);
577 * Change protections to allow amo operations on Shub 1.1 systems.
580 xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
585 if (!enable_shub_wars_1_1())
588 for_each_online_node(node
) {
589 nasid
= cnodeid_to_nasid(node
);
590 /* save current protection values */
591 xpc_prot_vec_sn2
[node
] =
592 (u64
)HUB_L((u64
*)GLOBAL_MMR_ADDR(nasid
,
593 SH1_MD_DQLP_MMR_DIR_PRIVEC0
));
594 /* open up everything */
595 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
,
596 SH1_MD_DQLP_MMR_DIR_PRIVEC0
),
598 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
,
599 SH1_MD_DQRP_MMR_DIR_PRIVEC0
),
604 static enum xp_retval
605 xpc_get_partition_rsvd_page_pa_sn2(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
611 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
612 if (status
== SALRET_OK
)
614 else if (status
== SALRET_MORE_PASSES
)
615 ret
= xpNeedMoreInfo
;
624 xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page
*rp
)
626 struct amo
*amos_page
;
630 xpc_vars_sn2
= XPC_RP_VARS(rp
);
632 rp
->sn
.vars_pa
= xp_pa(xpc_vars_sn2
);
634 /* vars_part array follows immediately after vars */
635 xpc_vars_part_sn2
= (struct xpc_vars_part_sn2
*)((u8
*)XPC_RP_VARS(rp
) +
639 * Before clearing xpc_vars_sn2, see if a page of amos had been
640 * previously allocated. If not we'll need to allocate one and set
641 * permissions so that cross-partition amos are allowed.
643 * The allocated amo page needs MCA reporting to remain disabled after
644 * XPC has unloaded. To make this work, we keep a copy of the pointer
645 * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
646 * which is pointed to by the reserved page, and re-use that saved copy
647 * on subsequent loads of XPC. This amo page is never freed, and its
648 * memory protections are never restricted.
650 amos_page
= xpc_vars_sn2
->amos_page
;
651 if (amos_page
== NULL
) {
652 amos_page
= (struct amo
*)TO_AMO(uncached_alloc_page(0, 1));
653 if (amos_page
== NULL
) {
654 dev_err(xpc_part
, "can't allocate page of amos\n");
659 * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
660 * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
662 ret
= xpc_allow_amo_ops_sn2(amos_page
);
663 if (ret
!= xpSuccess
) {
664 dev_err(xpc_part
, "can't allow amo operations\n");
665 uncached_free_page(__IA64_UNCACHED_OFFSET
|
666 TO_PHYS((u64
)amos_page
), 1);
671 /* clear xpc_vars_sn2 */
672 memset(xpc_vars_sn2
, 0, sizeof(struct xpc_vars_sn2
));
674 xpc_vars_sn2
->version
= XPC_V_VERSION
;
675 xpc_vars_sn2
->activate_IRQ_nasid
= cpuid_to_nasid(0);
676 xpc_vars_sn2
->activate_IRQ_phys_cpuid
= cpu_physical_id(0);
677 xpc_vars_sn2
->vars_part_pa
= xp_pa(xpc_vars_part_sn2
);
678 xpc_vars_sn2
->amos_page_pa
= ia64_tpa((u64
)amos_page
);
679 xpc_vars_sn2
->amos_page
= amos_page
; /* save for next load of XPC */
681 /* clear xpc_vars_part_sn2 */
682 memset((u64
*)xpc_vars_part_sn2
, 0, sizeof(struct xpc_vars_part_sn2
) *
683 XP_MAX_NPARTITIONS_SN2
);
685 /* initialize the activate IRQ related amo variables */
686 for (i
= 0; i
< xpc_nasid_mask_nlongs
; i
++)
687 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2
+ i
);
689 /* initialize the engaged remote partitions related amo variables */
690 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2
);
691 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2
);
697 xpc_increment_heartbeat_sn2(void)
699 xpc_vars_sn2
->heartbeat
++;
703 xpc_offline_heartbeat_sn2(void)
705 xpc_increment_heartbeat_sn2();
706 xpc_vars_sn2
->heartbeat_offline
= 1;
710 xpc_online_heartbeat_sn2(void)
712 xpc_increment_heartbeat_sn2();
713 xpc_vars_sn2
->heartbeat_offline
= 0;
717 xpc_heartbeat_init_sn2(void)
719 DBUG_ON(xpc_vars_sn2
== NULL
);
721 bitmap_zero(xpc_vars_sn2
->heartbeating_to_mask
, XP_MAX_NPARTITIONS_SN2
);
722 xpc_heartbeating_to_mask
= &xpc_vars_sn2
->heartbeating_to_mask
[0];
723 xpc_online_heartbeat_sn2();
727 xpc_heartbeat_exit_sn2(void)
729 xpc_offline_heartbeat_sn2();
732 static enum xp_retval
733 xpc_get_remote_heartbeat_sn2(struct xpc_partition
*part
)
735 struct xpc_vars_sn2
*remote_vars
;
738 remote_vars
= (struct xpc_vars_sn2
*)xpc_remote_copy_buffer_sn2
;
740 /* pull the remote vars structure that contains the heartbeat */
741 ret
= xp_remote_memcpy(xp_pa(remote_vars
),
742 part
->sn
.sn2
.remote_vars_pa
,
744 if (ret
!= xpSuccess
)
747 dev_dbg(xpc_part
, "partid=%d, heartbeat=%ld, last_heartbeat=%ld, "
748 "heartbeat_offline=%ld, HB_mask[0]=0x%lx\n", XPC_PARTID(part
),
749 remote_vars
->heartbeat
, part
->last_heartbeat
,
750 remote_vars
->heartbeat_offline
,
751 remote_vars
->heartbeating_to_mask
[0]);
753 if ((remote_vars
->heartbeat
== part
->last_heartbeat
&&
754 remote_vars
->heartbeat_offline
== 0) ||
755 !xpc_hb_allowed(sn_partition_id
,
756 &remote_vars
->heartbeating_to_mask
)) {
759 part
->last_heartbeat
= remote_vars
->heartbeat
;
766 * Get a copy of the remote partition's XPC variables from the reserved page.
768 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
769 * assumed to be of size XPC_RP_VARS_SIZE.
771 static enum xp_retval
772 xpc_get_remote_vars_sn2(unsigned long remote_vars_pa
,
773 struct xpc_vars_sn2
*remote_vars
)
777 if (remote_vars_pa
== 0)
780 /* pull over the cross partition variables */
781 ret
= xp_remote_memcpy(xp_pa(remote_vars
), remote_vars_pa
,
783 if (ret
!= xpSuccess
)
786 if (XPC_VERSION_MAJOR(remote_vars
->version
) !=
787 XPC_VERSION_MAJOR(XPC_V_VERSION
)) {
795 xpc_request_partition_activation_sn2(struct xpc_rsvd_page
*remote_rp
,
796 unsigned long remote_rp_pa
, int nasid
)
798 xpc_send_local_activate_IRQ_sn2(nasid
);
802 xpc_request_partition_reactivation_sn2(struct xpc_partition
*part
)
804 xpc_send_local_activate_IRQ_sn2(part
->sn
.sn2
.activate_IRQ_nasid
);
808 xpc_request_partition_deactivation_sn2(struct xpc_partition
*part
)
810 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
811 unsigned long irq_flags
;
812 struct amo
*amo
= (struct amo
*)__va(part_sn2
->remote_amos_page_pa
+
813 (XPC_DEACTIVATE_REQUEST_AMO_SN2
*
814 sizeof(struct amo
)));
816 local_irq_save(irq_flags
);
818 /* set bit corresponding to our partid in remote partition's amo */
819 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
,
820 BIT(sn_partition_id
));
823 * We must always use the nofault function regardless of whether we
824 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
825 * didn't, we'd never know that the other partition is down and would
826 * keep sending IRQs and amos to it until the heartbeat times out.
828 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
830 xp_nofault_PIOR_target
));
832 local_irq_restore(irq_flags
);
835 * Send activate IRQ to get other side to see that we've set our
836 * bit in their deactivate request amo.
838 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
840 part_sn2
->activate_IRQ_nasid
,
841 part_sn2
->activate_IRQ_phys_cpuid
);
845 xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition
*part
)
847 unsigned long irq_flags
;
848 struct amo
*amo
= (struct amo
*)__va(part
->sn
.sn2
.remote_amos_page_pa
+
849 (XPC_DEACTIVATE_REQUEST_AMO_SN2
*
850 sizeof(struct amo
)));
852 local_irq_save(irq_flags
);
854 /* clear bit corresponding to our partid in remote partition's amo */
855 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
856 ~BIT(sn_partition_id
));
859 * We must always use the nofault function regardless of whether we
860 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
861 * didn't, we'd never know that the other partition is down and would
862 * keep sending IRQs and amos to it until the heartbeat times out.
864 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
866 xp_nofault_PIOR_target
));
868 local_irq_restore(irq_flags
);
872 xpc_partition_deactivation_requested_sn2(short partid
)
874 struct amo
*amo
= xpc_vars_sn2
->amos_page
+
875 XPC_DEACTIVATE_REQUEST_AMO_SN2
;
877 /* our partition's amo variable ANDed with partid mask */
878 return (FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) &
883 * Update the remote partition's info.
886 xpc_update_partition_info_sn2(struct xpc_partition
*part
, u8 remote_rp_version
,
887 unsigned long *remote_rp_ts_jiffies
,
888 unsigned long remote_rp_pa
,
889 unsigned long remote_vars_pa
,
890 struct xpc_vars_sn2
*remote_vars
)
892 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
894 part
->remote_rp_version
= remote_rp_version
;
895 dev_dbg(xpc_part
, " remote_rp_version = 0x%016x\n",
896 part
->remote_rp_version
);
898 part
->remote_rp_ts_jiffies
= *remote_rp_ts_jiffies
;
899 dev_dbg(xpc_part
, " remote_rp_ts_jiffies = 0x%016lx\n",
900 part
->remote_rp_ts_jiffies
);
902 part
->remote_rp_pa
= remote_rp_pa
;
903 dev_dbg(xpc_part
, " remote_rp_pa = 0x%016lx\n", part
->remote_rp_pa
);
905 part_sn2
->remote_vars_pa
= remote_vars_pa
;
906 dev_dbg(xpc_part
, " remote_vars_pa = 0x%016lx\n",
907 part_sn2
->remote_vars_pa
);
909 part
->last_heartbeat
= remote_vars
->heartbeat
- 1;
910 dev_dbg(xpc_part
, " last_heartbeat = 0x%016lx\n",
911 part
->last_heartbeat
);
913 part_sn2
->remote_vars_part_pa
= remote_vars
->vars_part_pa
;
914 dev_dbg(xpc_part
, " remote_vars_part_pa = 0x%016lx\n",
915 part_sn2
->remote_vars_part_pa
);
917 part_sn2
->activate_IRQ_nasid
= remote_vars
->activate_IRQ_nasid
;
918 dev_dbg(xpc_part
, " activate_IRQ_nasid = 0x%x\n",
919 part_sn2
->activate_IRQ_nasid
);
921 part_sn2
->activate_IRQ_phys_cpuid
=
922 remote_vars
->activate_IRQ_phys_cpuid
;
923 dev_dbg(xpc_part
, " activate_IRQ_phys_cpuid = 0x%x\n",
924 part_sn2
->activate_IRQ_phys_cpuid
);
926 part_sn2
->remote_amos_page_pa
= remote_vars
->amos_page_pa
;
927 dev_dbg(xpc_part
, " remote_amos_page_pa = 0x%lx\n",
928 part_sn2
->remote_amos_page_pa
);
930 part_sn2
->remote_vars_version
= remote_vars
->version
;
931 dev_dbg(xpc_part
, " remote_vars_version = 0x%x\n",
932 part_sn2
->remote_vars_version
);
936 * Prior code has determined the nasid which generated a activate IRQ.
937 * Inspect that nasid to determine if its partition needs to be activated
940 * A partition is considered "awaiting activation" if our partition
941 * flags indicate it is not active and it has a heartbeat. A
942 * partition is considered "awaiting deactivation" if our partition
943 * flags indicate it is active but it has no heartbeat or it is not
944 * sending its heartbeat to us.
946 * To determine the heartbeat, the remote nasid must have a properly
947 * initialized reserved page.
950 xpc_identify_activate_IRQ_req_sn2(int nasid
)
952 struct xpc_rsvd_page
*remote_rp
;
953 struct xpc_vars_sn2
*remote_vars
;
954 unsigned long remote_rp_pa
;
955 unsigned long remote_vars_pa
;
956 int remote_rp_version
;
958 unsigned long remote_rp_ts_jiffies
= 0;
960 struct xpc_partition
*part
;
961 struct xpc_partition_sn2
*part_sn2
;
964 /* pull over the reserved page structure */
966 remote_rp
= (struct xpc_rsvd_page
*)xpc_remote_copy_buffer_sn2
;
968 ret
= xpc_get_remote_rp(nasid
, NULL
, remote_rp
, &remote_rp_pa
);
969 if (ret
!= xpSuccess
) {
970 dev_warn(xpc_part
, "unable to get reserved page from nasid %d, "
971 "which sent interrupt, reason=%d\n", nasid
, ret
);
975 remote_vars_pa
= remote_rp
->sn
.vars_pa
;
976 remote_rp_version
= remote_rp
->version
;
977 remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
979 partid
= remote_rp
->SAL_partid
;
980 part
= &xpc_partitions
[partid
];
981 part_sn2
= &part
->sn
.sn2
;
983 /* pull over the cross partition variables */
985 remote_vars
= (struct xpc_vars_sn2
*)xpc_remote_copy_buffer_sn2
;
987 ret
= xpc_get_remote_vars_sn2(remote_vars_pa
, remote_vars
);
988 if (ret
!= xpSuccess
) {
989 dev_warn(xpc_part
, "unable to get XPC variables from nasid %d, "
990 "which sent interrupt, reason=%d\n", nasid
, ret
);
992 XPC_DEACTIVATE_PARTITION(part
, ret
);
996 part
->activate_IRQ_rcvd
++;
998 dev_dbg(xpc_part
, "partid for nasid %d is %d; IRQs = %d; HB = "
999 "%ld:0x%lx\n", (int)nasid
, (int)partid
, part
->activate_IRQ_rcvd
,
1000 remote_vars
->heartbeat
, remote_vars
->heartbeating_to_mask
[0]);
1002 if (xpc_partition_disengaged(part
) &&
1003 part
->act_state
== XPC_P_AS_INACTIVE
) {
1005 xpc_update_partition_info_sn2(part
, remote_rp_version
,
1006 &remote_rp_ts_jiffies
,
1007 remote_rp_pa
, remote_vars_pa
,
1010 if (xpc_partition_deactivation_requested_sn2(partid
)) {
1012 * Other side is waiting on us to deactivate even though
1018 xpc_activate_partition(part
);
1022 DBUG_ON(part
->remote_rp_version
== 0);
1023 DBUG_ON(part_sn2
->remote_vars_version
== 0);
1025 if (remote_rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
) {
1027 /* the other side rebooted */
1029 DBUG_ON(xpc_partition_engaged_sn2(partid
));
1030 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid
));
1032 xpc_update_partition_info_sn2(part
, remote_rp_version
,
1033 &remote_rp_ts_jiffies
,
1034 remote_rp_pa
, remote_vars_pa
,
1039 if (part
->disengage_timeout
> 0 && !xpc_partition_disengaged(part
)) {
1040 /* still waiting on other side to disengage from us */
1045 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
1046 else if (xpc_partition_deactivation_requested_sn2(partid
))
1047 XPC_DEACTIVATE_PARTITION(part
, xpOtherGoingDown
);
1051 * Loop through the activation amo variables and process any bits
1052 * which are set. Each bit indicates a nasid sending a partition
1053 * activation or deactivation request.
1055 * Return #of IRQs detected.
1058 xpc_identify_activate_IRQ_sender_sn2(void)
1062 unsigned long nasid_mask_long
;
1063 u64 nasid
; /* remote nasid */
1064 int n_IRQs_detected
= 0;
1065 struct amo
*act_amos
;
1067 act_amos
= xpc_vars_sn2
->amos_page
+ XPC_ACTIVATE_IRQ_AMOS_SN2
;
1069 /* scan through activate amo variables looking for non-zero entries */
1070 for (l
= 0; l
< xpc_nasid_mask_nlongs
; l
++) {
1075 nasid_mask_long
= xpc_receive_IRQ_amo_sn2(&act_amos
[l
]);
1077 b
= find_first_bit(&nasid_mask_long
, BITS_PER_LONG
);
1078 if (b
>= BITS_PER_LONG
) {
1079 /* no IRQs from nasids in this amo variable */
1083 dev_dbg(xpc_part
, "amo[%d] gave back 0x%lx\n", l
,
1087 * If this nasid has been added to the machine since
1088 * our partition was reset, this will retain the
1089 * remote nasid in our reserved pages machine mask.
1090 * This is used in the event of module reload.
1092 xpc_mach_nasids
[l
] |= nasid_mask_long
;
1094 /* locate the nasid(s) which sent interrupts */
1098 nasid
= (l
* BITS_PER_LONG
+ b
) * 2;
1099 dev_dbg(xpc_part
, "interrupt from nasid %ld\n", nasid
);
1100 xpc_identify_activate_IRQ_req_sn2(nasid
);
1102 b
= find_next_bit(&nasid_mask_long
, BITS_PER_LONG
,
1104 } while (b
< BITS_PER_LONG
);
1106 return n_IRQs_detected
;
1110 xpc_process_activate_IRQ_rcvd_sn2(void)
1112 unsigned long irq_flags
;
1113 int n_IRQs_expected
;
1114 int n_IRQs_detected
;
1116 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1117 n_IRQs_expected
= xpc_activate_IRQ_rcvd
;
1118 xpc_activate_IRQ_rcvd
= 0;
1119 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1121 n_IRQs_detected
= xpc_identify_activate_IRQ_sender_sn2();
1122 if (n_IRQs_detected
< n_IRQs_expected
) {
1123 /* retry once to help avoid missing amo */
1124 (void)xpc_identify_activate_IRQ_sender_sn2();
1129 * Setup the channel structures that are sn2 specific.
1131 static enum xp_retval
1132 xpc_setup_ch_structures_sn_sn2(struct xpc_partition
*part
)
1134 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1135 struct xpc_channel_sn2
*ch_sn2
;
1136 enum xp_retval retval
;
1140 struct timer_list
*timer
;
1141 short partid
= XPC_PARTID(part
);
1143 /* allocate all the required GET/PUT values */
1145 part_sn2
->local_GPs
=
1146 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE
, GFP_KERNEL
,
1147 &part_sn2
->local_GPs_base
);
1148 if (part_sn2
->local_GPs
== NULL
) {
1149 dev_err(xpc_chan
, "can't get memory for local get/put "
1154 part_sn2
->remote_GPs
=
1155 xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE
, GFP_KERNEL
,
1156 &part_sn2
->remote_GPs_base
);
1157 if (part_sn2
->remote_GPs
== NULL
) {
1158 dev_err(xpc_chan
, "can't get memory for remote get/put "
1160 retval
= xpNoMemory
;
1164 part_sn2
->remote_GPs_pa
= 0;
1166 /* allocate all the required open and close args */
1168 part_sn2
->local_openclose_args
=
1169 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE
,
1170 GFP_KERNEL
, &part_sn2
->
1171 local_openclose_args_base
);
1172 if (part_sn2
->local_openclose_args
== NULL
) {
1173 dev_err(xpc_chan
, "can't get memory for local connect args\n");
1174 retval
= xpNoMemory
;
1178 part_sn2
->remote_openclose_args_pa
= 0;
1180 part_sn2
->local_chctl_amo_va
= xpc_init_IRQ_amo_sn2(partid
);
1182 part_sn2
->notify_IRQ_nasid
= 0;
1183 part_sn2
->notify_IRQ_phys_cpuid
= 0;
1184 part_sn2
->remote_chctl_amo_va
= NULL
;
1186 sprintf(part_sn2
->notify_IRQ_owner
, "xpc%02d", partid
);
1187 ret
= request_irq(SGI_XPC_NOTIFY
, xpc_handle_notify_IRQ_sn2
,
1188 IRQF_SHARED
, part_sn2
->notify_IRQ_owner
,
1189 (void *)(u64
)partid
);
1191 dev_err(xpc_chan
, "can't register NOTIFY IRQ handler, "
1192 "errno=%d\n", -ret
);
1193 retval
= xpLackOfResources
;
1197 /* Setup a timer to check for dropped notify IRQs */
1198 timer
= &part_sn2
->dropped_notify_IRQ_timer
;
1201 (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2
;
1202 timer
->data
= (unsigned long)part
;
1203 timer
->expires
= jiffies
+ XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL
;
1206 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
1207 ch_sn2
= &part
->channels
[ch_number
].sn
.sn2
;
1209 ch_sn2
->local_GP
= &part_sn2
->local_GPs
[ch_number
];
1210 ch_sn2
->local_openclose_args
=
1211 &part_sn2
->local_openclose_args
[ch_number
];
1213 mutex_init(&ch_sn2
->msg_to_pull_mutex
);
1217 * Setup the per partition specific variables required by the
1218 * remote partition to establish channel connections with us.
1220 * The setting of the magic # indicates that these per partition
1221 * specific variables are ready to be used.
1223 xpc_vars_part_sn2
[partid
].GPs_pa
= xp_pa(part_sn2
->local_GPs
);
1224 xpc_vars_part_sn2
[partid
].openclose_args_pa
=
1225 xp_pa(part_sn2
->local_openclose_args
);
1226 xpc_vars_part_sn2
[partid
].chctl_amo_pa
=
1227 xp_pa(part_sn2
->local_chctl_amo_va
);
1228 cpuid
= raw_smp_processor_id(); /* any CPU in this partition will do */
1229 xpc_vars_part_sn2
[partid
].notify_IRQ_nasid
= cpuid_to_nasid(cpuid
);
1230 xpc_vars_part_sn2
[partid
].notify_IRQ_phys_cpuid
=
1231 cpu_physical_id(cpuid
);
1232 xpc_vars_part_sn2
[partid
].nchannels
= part
->nchannels
;
1233 xpc_vars_part_sn2
[partid
].magic
= XPC_VP_MAGIC1_SN2
;
1237 /* setup of ch structures failed */
1239 kfree(part_sn2
->local_openclose_args_base
);
1240 part_sn2
->local_openclose_args
= NULL
;
1242 kfree(part_sn2
->remote_GPs_base
);
1243 part_sn2
->remote_GPs
= NULL
;
1245 kfree(part_sn2
->local_GPs_base
);
1246 part_sn2
->local_GPs
= NULL
;
1251 * Teardown the channel structures that are sn2 specific.
1254 xpc_teardown_ch_structures_sn_sn2(struct xpc_partition
*part
)
1256 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1257 short partid
= XPC_PARTID(part
);
1260 * Indicate that the variables specific to the remote partition are no
1261 * longer available for its use.
1263 xpc_vars_part_sn2
[partid
].magic
= 0;
1265 /* in case we've still got outstanding timers registered... */
1266 del_timer_sync(&part_sn2
->dropped_notify_IRQ_timer
);
1267 free_irq(SGI_XPC_NOTIFY
, (void *)(u64
)partid
);
1269 kfree(part_sn2
->local_openclose_args_base
);
1270 part_sn2
->local_openclose_args
= NULL
;
1271 kfree(part_sn2
->remote_GPs_base
);
1272 part_sn2
->remote_GPs
= NULL
;
1273 kfree(part_sn2
->local_GPs_base
);
1274 part_sn2
->local_GPs
= NULL
;
1275 part_sn2
->local_chctl_amo_va
= NULL
;
1279 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1280 * (or multiple cachelines) from a remote partition.
1282 * src_pa must be a cacheline aligned physical address on the remote partition.
1283 * dst must be a cacheline aligned virtual address on this partition.
1284 * cnt must be cacheline sized
1286 /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1287 static enum xp_retval
1288 xpc_pull_remote_cachelines_sn2(struct xpc_partition
*part
, void *dst
,
1289 const unsigned long src_pa
, size_t cnt
)
1293 DBUG_ON(src_pa
!= L1_CACHE_ALIGN(src_pa
));
1294 DBUG_ON((unsigned long)dst
!= L1_CACHE_ALIGN((unsigned long)dst
));
1295 DBUG_ON(cnt
!= L1_CACHE_ALIGN(cnt
));
1297 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1298 return part
->reason
;
1300 ret
= xp_remote_memcpy(xp_pa(dst
), src_pa
, cnt
);
1301 if (ret
!= xpSuccess
) {
1302 dev_dbg(xpc_chan
, "xp_remote_memcpy() from partition %d failed,"
1303 " ret=%d\n", XPC_PARTID(part
), ret
);
1309 * Pull the remote per partition specific variables from the specified
1312 static enum xp_retval
1313 xpc_pull_remote_vars_part_sn2(struct xpc_partition
*part
)
1315 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1316 u8 buffer
[L1_CACHE_BYTES
* 2];
1317 struct xpc_vars_part_sn2
*pulled_entry_cacheline
=
1318 (struct xpc_vars_part_sn2
*)L1_CACHE_ALIGN((u64
)buffer
);
1319 struct xpc_vars_part_sn2
*pulled_entry
;
1320 unsigned long remote_entry_cacheline_pa
;
1321 unsigned long remote_entry_pa
;
1322 short partid
= XPC_PARTID(part
);
1325 /* pull the cacheline that contains the variables we're interested in */
1327 DBUG_ON(part_sn2
->remote_vars_part_pa
!=
1328 L1_CACHE_ALIGN(part_sn2
->remote_vars_part_pa
));
1329 DBUG_ON(sizeof(struct xpc_vars_part_sn2
) != L1_CACHE_BYTES
/ 2);
1331 remote_entry_pa
= part_sn2
->remote_vars_part_pa
+
1332 sn_partition_id
* sizeof(struct xpc_vars_part_sn2
);
1334 remote_entry_cacheline_pa
= (remote_entry_pa
& ~(L1_CACHE_BYTES
- 1));
1336 pulled_entry
= (struct xpc_vars_part_sn2
*)((u64
)pulled_entry_cacheline
1337 + (remote_entry_pa
&
1338 (L1_CACHE_BYTES
- 1)));
1340 ret
= xpc_pull_remote_cachelines_sn2(part
, pulled_entry_cacheline
,
1341 remote_entry_cacheline_pa
,
1343 if (ret
!= xpSuccess
) {
1344 dev_dbg(xpc_chan
, "failed to pull XPC vars_part from "
1345 "partition %d, ret=%d\n", partid
, ret
);
1349 /* see if they've been set up yet */
1351 if (pulled_entry
->magic
!= XPC_VP_MAGIC1_SN2
&&
1352 pulled_entry
->magic
!= XPC_VP_MAGIC2_SN2
) {
1354 if (pulled_entry
->magic
!= 0) {
1355 dev_dbg(xpc_chan
, "partition %d's XPC vars_part for "
1356 "partition %d has bad magic value (=0x%lx)\n",
1357 partid
, sn_partition_id
, pulled_entry
->magic
);
1361 /* they've not been initialized yet */
1365 if (xpc_vars_part_sn2
[partid
].magic
== XPC_VP_MAGIC1_SN2
) {
1367 /* validate the variables */
1369 if (pulled_entry
->GPs_pa
== 0 ||
1370 pulled_entry
->openclose_args_pa
== 0 ||
1371 pulled_entry
->chctl_amo_pa
== 0) {
1373 dev_err(xpc_chan
, "partition %d's XPC vars_part for "
1374 "partition %d are not valid\n", partid
,
1376 return xpInvalidAddress
;
1379 /* the variables we imported look to be valid */
1381 part_sn2
->remote_GPs_pa
= pulled_entry
->GPs_pa
;
1382 part_sn2
->remote_openclose_args_pa
=
1383 pulled_entry
->openclose_args_pa
;
1384 part_sn2
->remote_chctl_amo_va
=
1385 (struct amo
*)__va(pulled_entry
->chctl_amo_pa
);
1386 part_sn2
->notify_IRQ_nasid
= pulled_entry
->notify_IRQ_nasid
;
1387 part_sn2
->notify_IRQ_phys_cpuid
=
1388 pulled_entry
->notify_IRQ_phys_cpuid
;
1390 if (part
->nchannels
> pulled_entry
->nchannels
)
1391 part
->nchannels
= pulled_entry
->nchannels
;
1393 /* let the other side know that we've pulled their variables */
1395 xpc_vars_part_sn2
[partid
].magic
= XPC_VP_MAGIC2_SN2
;
1398 if (pulled_entry
->magic
== XPC_VP_MAGIC1_SN2
)
1405 * Establish first contact with the remote partititon. This involves pulling
1406 * the XPC per partition variables from the remote partition and waiting for
1407 * the remote partition to pull ours.
1409 static enum xp_retval
1410 xpc_make_first_contact_sn2(struct xpc_partition
*part
)
1412 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1416 * Register the remote partition's amos with SAL so it can handle
1417 * and cleanup errors within that address range should the remote
1418 * partition go down. We don't unregister this range because it is
1419 * difficult to tell when outstanding writes to the remote partition
1420 * are finished and thus when it is safe to unregister. This should
1421 * not result in wasted space in the SAL xp_addr_region table because
1422 * we should get the same page for remote_amos_page_pa after module
1423 * reloads and system reboots.
1425 if (sn_register_xp_addr_region(part_sn2
->remote_amos_page_pa
,
1426 PAGE_SIZE
, 1) < 0) {
1427 dev_warn(xpc_part
, "xpc_activating(%d) failed to register "
1428 "xp_addr region\n", XPC_PARTID(part
));
1430 ret
= xpPhysAddrRegFailed
;
1431 XPC_DEACTIVATE_PARTITION(part
, ret
);
1436 * Send activate IRQ to get other side to activate if they've not
1437 * already begun to do so.
1439 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
1440 cnodeid_to_nasid(0),
1441 part_sn2
->activate_IRQ_nasid
,
1442 part_sn2
->activate_IRQ_phys_cpuid
);
1444 while ((ret
= xpc_pull_remote_vars_part_sn2(part
)) != xpSuccess
) {
1445 if (ret
!= xpRetry
) {
1446 XPC_DEACTIVATE_PARTITION(part
, ret
);
1450 dev_dbg(xpc_part
, "waiting to make first contact with "
1451 "partition %d\n", XPC_PARTID(part
));
1453 /* wait a 1/4 of a second or so */
1454 (void)msleep_interruptible(250);
1456 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1457 return part
->reason
;
1464 * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1467 xpc_get_chctl_all_flags_sn2(struct xpc_partition
*part
)
1469 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1470 unsigned long irq_flags
;
1471 union xpc_channel_ctl_flags chctl
;
1475 * See if there are any chctl flags to be handled.
1478 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1479 chctl
= part
->chctl
;
1480 if (chctl
.all_flags
!= 0)
1481 part
->chctl
.all_flags
= 0;
1483 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1485 if (xpc_any_openclose_chctl_flags_set(&chctl
)) {
1486 ret
= xpc_pull_remote_cachelines_sn2(part
, part
->
1487 remote_openclose_args
,
1489 remote_openclose_args_pa
,
1490 XPC_OPENCLOSE_ARGS_SIZE
);
1491 if (ret
!= xpSuccess
) {
1492 XPC_DEACTIVATE_PARTITION(part
, ret
);
1494 dev_dbg(xpc_chan
, "failed to pull openclose args from "
1495 "partition %d, ret=%d\n", XPC_PARTID(part
),
1498 /* don't bother processing chctl flags anymore */
1499 chctl
.all_flags
= 0;
1503 if (xpc_any_msg_chctl_flags_set(&chctl
)) {
1504 ret
= xpc_pull_remote_cachelines_sn2(part
, part_sn2
->remote_GPs
,
1505 part_sn2
->remote_GPs_pa
,
1507 if (ret
!= xpSuccess
) {
1508 XPC_DEACTIVATE_PARTITION(part
, ret
);
1510 dev_dbg(xpc_chan
, "failed to pull GPs from partition "
1511 "%d, ret=%d\n", XPC_PARTID(part
), ret
);
1513 /* don't bother processing chctl flags anymore */
1514 chctl
.all_flags
= 0;
1518 return chctl
.all_flags
;
1522 * Allocate the local message queue and the notify queue.
1524 static enum xp_retval
1525 xpc_allocate_local_msgqueue_sn2(struct xpc_channel
*ch
)
1527 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1528 unsigned long irq_flags
;
1532 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
1534 nbytes
= nentries
* ch
->entry_size
;
1535 ch_sn2
->local_msgqueue
=
1536 xpc_kzalloc_cacheline_aligned(nbytes
, GFP_KERNEL
,
1537 &ch_sn2
->local_msgqueue_base
);
1538 if (ch_sn2
->local_msgqueue
== NULL
)
1541 nbytes
= nentries
* sizeof(struct xpc_notify_sn2
);
1542 ch_sn2
->notify_queue
= kzalloc(nbytes
, GFP_KERNEL
);
1543 if (ch_sn2
->notify_queue
== NULL
) {
1544 kfree(ch_sn2
->local_msgqueue_base
);
1545 ch_sn2
->local_msgqueue
= NULL
;
1549 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1550 if (nentries
< ch
->local_nentries
) {
1551 dev_dbg(xpc_chan
, "nentries=%d local_nentries=%d, "
1552 "partid=%d, channel=%d\n", nentries
,
1553 ch
->local_nentries
, ch
->partid
, ch
->number
);
1555 ch
->local_nentries
= nentries
;
1557 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1561 dev_dbg(xpc_chan
, "can't get memory for local message queue and notify "
1562 "queue, partid=%d, channel=%d\n", ch
->partid
, ch
->number
);
1567 * Allocate the cached remote message queue.
1569 static enum xp_retval
1570 xpc_allocate_remote_msgqueue_sn2(struct xpc_channel
*ch
)
1572 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1573 unsigned long irq_flags
;
1577 DBUG_ON(ch
->remote_nentries
<= 0);
1579 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1581 nbytes
= nentries
* ch
->entry_size
;
1582 ch_sn2
->remote_msgqueue
=
1583 xpc_kzalloc_cacheline_aligned(nbytes
, GFP_KERNEL
, &ch_sn2
->
1584 remote_msgqueue_base
);
1585 if (ch_sn2
->remote_msgqueue
== NULL
)
1588 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1589 if (nentries
< ch
->remote_nentries
) {
1590 dev_dbg(xpc_chan
, "nentries=%d remote_nentries=%d, "
1591 "partid=%d, channel=%d\n", nentries
,
1592 ch
->remote_nentries
, ch
->partid
, ch
->number
);
1594 ch
->remote_nentries
= nentries
;
1596 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1600 dev_dbg(xpc_chan
, "can't get memory for cached remote message queue, "
1601 "partid=%d, channel=%d\n", ch
->partid
, ch
->number
);
1606 * Allocate message queues and other stuff associated with a channel.
1608 * Note: Assumes all of the channel sizes are filled in.
1610 static enum xp_retval
1611 xpc_setup_msg_structures_sn2(struct xpc_channel
*ch
)
1613 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1616 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1618 ret
= xpc_allocate_local_msgqueue_sn2(ch
);
1619 if (ret
== xpSuccess
) {
1621 ret
= xpc_allocate_remote_msgqueue_sn2(ch
);
1622 if (ret
!= xpSuccess
) {
1623 kfree(ch_sn2
->local_msgqueue_base
);
1624 ch_sn2
->local_msgqueue
= NULL
;
1625 kfree(ch_sn2
->notify_queue
);
1626 ch_sn2
->notify_queue
= NULL
;
1633 * Free up message queues and other stuff that were allocated for the specified
1637 xpc_teardown_msg_structures_sn2(struct xpc_channel
*ch
)
1639 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1641 DBUG_ON(!spin_is_locked(&ch
->lock
));
1643 ch_sn2
->remote_msgqueue_pa
= 0;
1645 ch_sn2
->local_GP
->get
= 0;
1646 ch_sn2
->local_GP
->put
= 0;
1647 ch_sn2
->remote_GP
.get
= 0;
1648 ch_sn2
->remote_GP
.put
= 0;
1649 ch_sn2
->w_local_GP
.get
= 0;
1650 ch_sn2
->w_local_GP
.put
= 0;
1651 ch_sn2
->w_remote_GP
.get
= 0;
1652 ch_sn2
->w_remote_GP
.put
= 0;
1653 ch_sn2
->next_msg_to_pull
= 0;
1655 if (ch
->flags
& XPC_C_SETUP
) {
1656 dev_dbg(xpc_chan
, "ch->flags=0x%x, partid=%d, channel=%d\n",
1657 ch
->flags
, ch
->partid
, ch
->number
);
1659 kfree(ch_sn2
->local_msgqueue_base
);
1660 ch_sn2
->local_msgqueue
= NULL
;
1661 kfree(ch_sn2
->remote_msgqueue_base
);
1662 ch_sn2
->remote_msgqueue
= NULL
;
1663 kfree(ch_sn2
->notify_queue
);
1664 ch_sn2
->notify_queue
= NULL
;
1669 * Notify those who wanted to be notified upon delivery of their message.
1672 xpc_notify_senders_sn2(struct xpc_channel
*ch
, enum xp_retval reason
, s64 put
)
1674 struct xpc_notify_sn2
*notify
;
1676 s64 get
= ch
->sn
.sn2
.w_remote_GP
.get
- 1;
1678 while (++get
< put
&& atomic_read(&ch
->n_to_notify
) > 0) {
1680 notify
= &ch
->sn
.sn2
.notify_queue
[get
% ch
->local_nentries
];
1683 * See if the notify entry indicates it was associated with
1684 * a message who's sender wants to be notified. It is possible
1685 * that it is, but someone else is doing or has done the
1688 notify_type
= notify
->type
;
1689 if (notify_type
== 0 ||
1690 cmpxchg(¬ify
->type
, notify_type
, 0) != notify_type
) {
1694 DBUG_ON(notify_type
!= XPC_N_CALL
);
1696 atomic_dec(&ch
->n_to_notify
);
1698 if (notify
->func
!= NULL
) {
1699 dev_dbg(xpc_chan
, "notify->func() called, notify=0x%p "
1700 "msg_number=%ld partid=%d channel=%d\n",
1701 (void *)notify
, get
, ch
->partid
, ch
->number
);
1703 notify
->func(reason
, ch
->partid
, ch
->number
,
1706 dev_dbg(xpc_chan
, "notify->func() returned, notify=0x%p"
1707 " msg_number=%ld partid=%d channel=%d\n",
1708 (void *)notify
, get
, ch
->partid
, ch
->number
);
1714 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel
*ch
)
1716 xpc_notify_senders_sn2(ch
, ch
->reason
, ch
->sn
.sn2
.w_local_GP
.put
);
1720 * Clear some of the msg flags in the local message queue.
1723 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel
*ch
)
1725 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1726 struct xpc_msg_sn2
*msg
;
1729 get
= ch_sn2
->w_remote_GP
.get
;
1731 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->local_msgqueue
+
1732 (get
% ch
->local_nentries
) *
1734 DBUG_ON(!(msg
->flags
& XPC_M_SN2_READY
));
1736 } while (++get
< ch_sn2
->remote_GP
.get
);
1740 * Clear some of the msg flags in the remote message queue.
1743 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel
*ch
)
1745 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1746 struct xpc_msg_sn2
*msg
;
1749 /* flags are zeroed when the buffer is allocated */
1750 if (ch_sn2
->remote_GP
.put
< ch
->remote_nentries
)
1753 put
= max(ch_sn2
->w_remote_GP
.put
, ch
->remote_nentries
);
1755 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->remote_msgqueue
+
1756 (put
% ch
->remote_nentries
) *
1758 DBUG_ON(!(msg
->flags
& XPC_M_SN2_READY
));
1759 DBUG_ON(!(msg
->flags
& XPC_M_SN2_DONE
));
1760 DBUG_ON(msg
->number
!= put
- ch
->remote_nentries
);
1762 } while (++put
< ch_sn2
->remote_GP
.put
);
1766 xpc_n_of_deliverable_payloads_sn2(struct xpc_channel
*ch
)
1768 return ch
->sn
.sn2
.w_remote_GP
.put
- ch
->sn
.sn2
.w_local_GP
.get
;
1772 xpc_process_msg_chctl_flags_sn2(struct xpc_partition
*part
, int ch_number
)
1774 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1775 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1778 ch_sn2
->remote_GP
= part
->sn
.sn2
.remote_GPs
[ch_number
];
1780 /* See what, if anything, has changed for each connected channel */
1782 xpc_msgqueue_ref(ch
);
1784 if (ch_sn2
->w_remote_GP
.get
== ch_sn2
->remote_GP
.get
&&
1785 ch_sn2
->w_remote_GP
.put
== ch_sn2
->remote_GP
.put
) {
1786 /* nothing changed since GPs were last pulled */
1787 xpc_msgqueue_deref(ch
);
1791 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1792 xpc_msgqueue_deref(ch
);
1797 * First check to see if messages recently sent by us have been
1798 * received by the other side. (The remote GET value will have
1799 * changed since we last looked at it.)
1802 if (ch_sn2
->w_remote_GP
.get
!= ch_sn2
->remote_GP
.get
) {
1805 * We need to notify any senders that want to be notified
1806 * that their sent messages have been received by their
1807 * intended recipients. We need to do this before updating
1808 * w_remote_GP.get so that we don't allocate the same message
1809 * queue entries prematurely (see xpc_allocate_msg()).
1811 if (atomic_read(&ch
->n_to_notify
) > 0) {
1813 * Notify senders that messages sent have been
1814 * received and delivered by the other side.
1816 xpc_notify_senders_sn2(ch
, xpMsgDelivered
,
1817 ch_sn2
->remote_GP
.get
);
1821 * Clear msg->flags in previously sent messages, so that
1822 * they're ready for xpc_allocate_msg().
1824 xpc_clear_local_msgqueue_flags_sn2(ch
);
1826 ch_sn2
->w_remote_GP
.get
= ch_sn2
->remote_GP
.get
;
1828 dev_dbg(xpc_chan
, "w_remote_GP.get changed to %ld, partid=%d, "
1829 "channel=%d\n", ch_sn2
->w_remote_GP
.get
, ch
->partid
,
1833 * If anyone was waiting for message queue entries to become
1834 * available, wake them up.
1836 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1837 wake_up(&ch
->msg_allocate_wq
);
1841 * Now check for newly sent messages by the other side. (The remote
1842 * PUT value will have changed since we last looked at it.)
1845 if (ch_sn2
->w_remote_GP
.put
!= ch_sn2
->remote_GP
.put
) {
1847 * Clear msg->flags in previously received messages, so that
1848 * they're ready for xpc_get_deliverable_payload_sn2().
1850 xpc_clear_remote_msgqueue_flags_sn2(ch
);
1852 smp_wmb(); /* ensure flags have been cleared before bte_copy */
1853 ch_sn2
->w_remote_GP
.put
= ch_sn2
->remote_GP
.put
;
1855 dev_dbg(xpc_chan
, "w_remote_GP.put changed to %ld, partid=%d, "
1856 "channel=%d\n", ch_sn2
->w_remote_GP
.put
, ch
->partid
,
1859 npayloads_sent
= xpc_n_of_deliverable_payloads_sn2(ch
);
1860 if (npayloads_sent
> 0) {
1861 dev_dbg(xpc_chan
, "msgs waiting to be copied and "
1862 "delivered=%d, partid=%d, channel=%d\n",
1863 npayloads_sent
, ch
->partid
, ch
->number
);
1865 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)
1866 xpc_activate_kthreads(ch
, npayloads_sent
);
1870 xpc_msgqueue_deref(ch
);
1873 static struct xpc_msg_sn2
*
1874 xpc_pull_remote_msg_sn2(struct xpc_channel
*ch
, s64 get
)
1876 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
1877 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1878 unsigned long remote_msg_pa
;
1879 struct xpc_msg_sn2
*msg
;
1885 if (mutex_lock_interruptible(&ch_sn2
->msg_to_pull_mutex
) != 0) {
1886 /* we were interrupted by a signal */
1890 while (get
>= ch_sn2
->next_msg_to_pull
) {
1892 /* pull as many messages as are ready and able to be pulled */
1894 msg_index
= ch_sn2
->next_msg_to_pull
% ch
->remote_nentries
;
1896 DBUG_ON(ch_sn2
->next_msg_to_pull
>= ch_sn2
->w_remote_GP
.put
);
1897 nmsgs
= ch_sn2
->w_remote_GP
.put
- ch_sn2
->next_msg_to_pull
;
1898 if (msg_index
+ nmsgs
> ch
->remote_nentries
) {
1899 /* ignore the ones that wrap the msg queue for now */
1900 nmsgs
= ch
->remote_nentries
- msg_index
;
1903 msg_offset
= msg_index
* ch
->entry_size
;
1904 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->remote_msgqueue
+
1906 remote_msg_pa
= ch_sn2
->remote_msgqueue_pa
+ msg_offset
;
1908 ret
= xpc_pull_remote_cachelines_sn2(part
, msg
, remote_msg_pa
,
1909 nmsgs
* ch
->entry_size
);
1910 if (ret
!= xpSuccess
) {
1912 dev_dbg(xpc_chan
, "failed to pull %d msgs starting with"
1913 " msg %ld from partition %d, channel=%d, "
1914 "ret=%d\n", nmsgs
, ch_sn2
->next_msg_to_pull
,
1915 ch
->partid
, ch
->number
, ret
);
1917 XPC_DEACTIVATE_PARTITION(part
, ret
);
1919 mutex_unlock(&ch_sn2
->msg_to_pull_mutex
);
1923 ch_sn2
->next_msg_to_pull
+= nmsgs
;
1926 mutex_unlock(&ch_sn2
->msg_to_pull_mutex
);
1928 /* return the message we were looking for */
1929 msg_offset
= (get
% ch
->remote_nentries
) * ch
->entry_size
;
1930 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->remote_msgqueue
+ msg_offset
);
1936 * Get the next deliverable message's payload.
1939 xpc_get_deliverable_payload_sn2(struct xpc_channel
*ch
)
1941 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1942 struct xpc_msg_sn2
*msg
;
1943 void *payload
= NULL
;
1947 if (ch
->flags
& XPC_C_DISCONNECTING
)
1950 get
= ch_sn2
->w_local_GP
.get
;
1951 smp_rmb(); /* guarantee that .get loads before .put */
1952 if (get
== ch_sn2
->w_remote_GP
.put
)
1955 /* There are messages waiting to be pulled and delivered.
1956 * We need to try to secure one for ourselves. We'll do this
1957 * by trying to increment w_local_GP.get and hope that no one
1958 * else beats us to it. If they do, we'll we'll simply have
1959 * to try again for the next one.
1962 if (cmpxchg(&ch_sn2
->w_local_GP
.get
, get
, get
+ 1) == get
) {
1963 /* we got the entry referenced by get */
1965 dev_dbg(xpc_chan
, "w_local_GP.get changed to %ld, "
1966 "partid=%d, channel=%d\n", get
+ 1,
1967 ch
->partid
, ch
->number
);
1969 /* pull the message from the remote partition */
1971 msg
= xpc_pull_remote_msg_sn2(ch
, get
);
1974 DBUG_ON(msg
->number
!= get
);
1975 DBUG_ON(msg
->flags
& XPC_M_SN2_DONE
);
1976 DBUG_ON(!(msg
->flags
& XPC_M_SN2_READY
));
1978 payload
= &msg
->payload
;
1989 * Now we actually send the messages that are ready to be sent by advancing
1990 * the local message queue's Put value and then send a chctl msgrequest to the
1991 * recipient partition.
1994 xpc_send_msgs_sn2(struct xpc_channel
*ch
, s64 initial_put
)
1996 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1997 struct xpc_msg_sn2
*msg
;
1998 s64 put
= initial_put
+ 1;
1999 int send_msgrequest
= 0;
2004 if (put
== ch_sn2
->w_local_GP
.put
)
2007 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->
2008 local_msgqueue
+ (put
%
2009 ch
->local_nentries
) *
2012 if (!(msg
->flags
& XPC_M_SN2_READY
))
2018 if (put
== initial_put
) {
2019 /* nothing's changed */
2023 if (cmpxchg_rel(&ch_sn2
->local_GP
->put
, initial_put
, put
) !=
2025 /* someone else beat us to it */
2026 DBUG_ON(ch_sn2
->local_GP
->put
< initial_put
);
2030 /* we just set the new value of local_GP->put */
2032 dev_dbg(xpc_chan
, "local_GP->put changed to %ld, partid=%d, "
2033 "channel=%d\n", put
, ch
->partid
, ch
->number
);
2035 send_msgrequest
= 1;
2038 * We need to ensure that the message referenced by
2039 * local_GP->put is not XPC_M_SN2_READY or that local_GP->put
2040 * equals w_local_GP.put, so we'll go have a look.
2045 if (send_msgrequest
)
2046 xpc_send_chctl_msgrequest_sn2(ch
);
2050 * Allocate an entry for a message from the message queue associated with the
2051 * specified channel.
2053 static enum xp_retval
2054 xpc_allocate_msg_sn2(struct xpc_channel
*ch
, u32 flags
,
2055 struct xpc_msg_sn2
**address_of_msg
)
2057 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
2058 struct xpc_msg_sn2
*msg
;
2063 * Get the next available message entry from the local message queue.
2064 * If none are available, we'll make sure that we grab the latest
2071 put
= ch_sn2
->w_local_GP
.put
;
2072 smp_rmb(); /* guarantee that .put loads before .get */
2073 if (put
- ch_sn2
->w_remote_GP
.get
< ch
->local_nentries
) {
2075 /* There are available message entries. We need to try
2076 * to secure one for ourselves. We'll do this by trying
2077 * to increment w_local_GP.put as long as someone else
2078 * doesn't beat us to it. If they do, we'll have to
2081 if (cmpxchg(&ch_sn2
->w_local_GP
.put
, put
, put
+ 1) ==
2083 /* we got the entry referenced by put */
2086 continue; /* try again */
2090 * There aren't any available msg entries at this time.
2092 * In waiting for a message entry to become available,
2093 * we set a timeout in case the other side is not sending
2094 * completion interrupts. This lets us fake a notify IRQ
2095 * that will cause the notify IRQ handler to fetch the latest
2096 * GP values as if an interrupt was sent by the other side.
2098 if (ret
== xpTimeout
)
2099 xpc_send_chctl_local_msgrequest_sn2(ch
);
2101 if (flags
& XPC_NOWAIT
)
2104 ret
= xpc_allocate_msg_wait(ch
);
2105 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
2109 /* get the message's address and initialize it */
2110 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->local_msgqueue
+
2111 (put
% ch
->local_nentries
) *
2114 DBUG_ON(msg
->flags
!= 0);
2117 dev_dbg(xpc_chan
, "w_local_GP.put changed to %ld; msg=0x%p, "
2118 "msg_number=%ld, partid=%d, channel=%d\n", put
+ 1,
2119 (void *)msg
, msg
->number
, ch
->partid
, ch
->number
);
2121 *address_of_msg
= msg
;
2126 * Common code that does the actual sending of the message by advancing the
2127 * local message queue's Put value and sends a chctl msgrequest to the
2128 * partition the message is being sent to.
2130 static enum xp_retval
2131 xpc_send_payload_sn2(struct xpc_channel
*ch
, u32 flags
, void *payload
,
2132 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
2135 enum xp_retval ret
= xpSuccess
;
2136 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
2137 struct xpc_msg_sn2
*msg
= msg
;
2138 struct xpc_notify_sn2
*notify
= notify
;
2142 DBUG_ON(notify_type
== XPC_N_CALL
&& func
== NULL
);
2144 if (XPC_MSG_SIZE(payload_size
) > ch
->entry_size
)
2145 return xpPayloadTooBig
;
2147 xpc_msgqueue_ref(ch
);
2149 if (ch
->flags
& XPC_C_DISCONNECTING
) {
2153 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
2154 ret
= xpNotConnected
;
2158 ret
= xpc_allocate_msg_sn2(ch
, flags
, &msg
);
2159 if (ret
!= xpSuccess
)
2162 msg_number
= msg
->number
;
2164 if (notify_type
!= 0) {
2166 * Tell the remote side to send an ACK interrupt when the
2167 * message has been delivered.
2169 msg
->flags
|= XPC_M_SN2_INTERRUPT
;
2171 atomic_inc(&ch
->n_to_notify
);
2173 notify
= &ch_sn2
->notify_queue
[msg_number
% ch
->local_nentries
];
2174 notify
->func
= func
;
2176 notify
->type
= notify_type
;
2178 /* ??? Is a mb() needed here? */
2180 if (ch
->flags
& XPC_C_DISCONNECTING
) {
2182 * An error occurred between our last error check and
2183 * this one. We will try to clear the type field from
2184 * the notify entry. If we succeed then
2185 * xpc_disconnect_channel() didn't already process
2188 if (cmpxchg(¬ify
->type
, notify_type
, 0) ==
2190 atomic_dec(&ch
->n_to_notify
);
2197 memcpy(&msg
->payload
, payload
, payload_size
);
2199 msg
->flags
|= XPC_M_SN2_READY
;
2202 * The preceding store of msg->flags must occur before the following
2203 * load of local_GP->put.
2207 /* see if the message is next in line to be sent, if so send it */
2209 put
= ch_sn2
->local_GP
->put
;
2210 if (put
== msg_number
)
2211 xpc_send_msgs_sn2(ch
, put
);
2214 xpc_msgqueue_deref(ch
);
2219 * Now we actually acknowledge the messages that have been delivered and ack'd
2220 * by advancing the cached remote message queue's Get value and if requested
2221 * send a chctl msgrequest to the message sender's partition.
2223 * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition
2224 * that sent the message.
2227 xpc_acknowledge_msgs_sn2(struct xpc_channel
*ch
, s64 initial_get
, u8 msg_flags
)
2229 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
2230 struct xpc_msg_sn2
*msg
;
2231 s64 get
= initial_get
+ 1;
2232 int send_msgrequest
= 0;
2237 if (get
== ch_sn2
->w_local_GP
.get
)
2240 msg
= (struct xpc_msg_sn2
*)((u64
)ch_sn2
->
2241 remote_msgqueue
+ (get
%
2242 ch
->remote_nentries
) *
2245 if (!(msg
->flags
& XPC_M_SN2_DONE
))
2248 msg_flags
|= msg
->flags
;
2252 if (get
== initial_get
) {
2253 /* nothing's changed */
2257 if (cmpxchg_rel(&ch_sn2
->local_GP
->get
, initial_get
, get
) !=
2259 /* someone else beat us to it */
2260 DBUG_ON(ch_sn2
->local_GP
->get
<= initial_get
);
2264 /* we just set the new value of local_GP->get */
2266 dev_dbg(xpc_chan
, "local_GP->get changed to %ld, partid=%d, "
2267 "channel=%d\n", get
, ch
->partid
, ch
->number
);
2269 send_msgrequest
= (msg_flags
& XPC_M_SN2_INTERRUPT
);
2272 * We need to ensure that the message referenced by
2273 * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get
2274 * equals w_local_GP.get, so we'll go have a look.
2279 if (send_msgrequest
)
2280 xpc_send_chctl_msgrequest_sn2(ch
);
2284 xpc_received_payload_sn2(struct xpc_channel
*ch
, void *payload
)
2286 struct xpc_msg_sn2
*msg
;
2290 msg
= container_of(payload
, struct xpc_msg_sn2
, payload
);
2291 msg_number
= msg
->number
;
2293 dev_dbg(xpc_chan
, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2294 (void *)msg
, msg_number
, ch
->partid
, ch
->number
);
2296 DBUG_ON((((u64
)msg
- (u64
)ch
->sn
.sn2
.remote_msgqueue
) / ch
->entry_size
) !=
2297 msg_number
% ch
->remote_nentries
);
2298 DBUG_ON(!(msg
->flags
& XPC_M_SN2_READY
));
2299 DBUG_ON(msg
->flags
& XPC_M_SN2_DONE
);
2301 msg
->flags
|= XPC_M_SN2_DONE
;
2304 * The preceding store of msg->flags must occur before the following
2305 * load of local_GP->get.
2310 * See if this message is next in line to be acknowledged as having
2313 get
= ch
->sn
.sn2
.local_GP
->get
;
2314 if (get
== msg_number
)
2315 xpc_acknowledge_msgs_sn2(ch
, get
, msg
->flags
);
2324 xpc_setup_partitions_sn
= xpc_setup_partitions_sn_sn2
;
2325 xpc_teardown_partitions_sn
= xpc_teardown_partitions_sn_sn2
;
2326 xpc_get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_sn2
;
2327 xpc_setup_rsvd_page_sn
= xpc_setup_rsvd_page_sn_sn2
;
2328 xpc_increment_heartbeat
= xpc_increment_heartbeat_sn2
;
2329 xpc_offline_heartbeat
= xpc_offline_heartbeat_sn2
;
2330 xpc_online_heartbeat
= xpc_online_heartbeat_sn2
;
2331 xpc_heartbeat_init
= xpc_heartbeat_init_sn2
;
2332 xpc_heartbeat_exit
= xpc_heartbeat_exit_sn2
;
2333 xpc_get_remote_heartbeat
= xpc_get_remote_heartbeat_sn2
;
2335 xpc_request_partition_activation
= xpc_request_partition_activation_sn2
;
2336 xpc_request_partition_reactivation
=
2337 xpc_request_partition_reactivation_sn2
;
2338 xpc_request_partition_deactivation
=
2339 xpc_request_partition_deactivation_sn2
;
2340 xpc_cancel_partition_deactivation_request
=
2341 xpc_cancel_partition_deactivation_request_sn2
;
2343 xpc_process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_sn2
;
2344 xpc_setup_ch_structures_sn
= xpc_setup_ch_structures_sn_sn2
;
2345 xpc_teardown_ch_structures_sn
= xpc_teardown_ch_structures_sn_sn2
;
2346 xpc_make_first_contact
= xpc_make_first_contact_sn2
;
2348 xpc_get_chctl_all_flags
= xpc_get_chctl_all_flags_sn2
;
2349 xpc_send_chctl_closerequest
= xpc_send_chctl_closerequest_sn2
;
2350 xpc_send_chctl_closereply
= xpc_send_chctl_closereply_sn2
;
2351 xpc_send_chctl_openrequest
= xpc_send_chctl_openrequest_sn2
;
2352 xpc_send_chctl_openreply
= xpc_send_chctl_openreply_sn2
;
2354 xpc_save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_sn2
;
2356 xpc_setup_msg_structures
= xpc_setup_msg_structures_sn2
;
2357 xpc_teardown_msg_structures
= xpc_teardown_msg_structures_sn2
;
2359 xpc_notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_sn2
;
2360 xpc_process_msg_chctl_flags
= xpc_process_msg_chctl_flags_sn2
;
2361 xpc_n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_sn2
;
2362 xpc_get_deliverable_payload
= xpc_get_deliverable_payload_sn2
;
2364 xpc_indicate_partition_engaged
= xpc_indicate_partition_engaged_sn2
;
2365 xpc_indicate_partition_disengaged
=
2366 xpc_indicate_partition_disengaged_sn2
;
2367 xpc_partition_engaged
= xpc_partition_engaged_sn2
;
2368 xpc_any_partition_engaged
= xpc_any_partition_engaged_sn2
;
2369 xpc_assume_partition_disengaged
= xpc_assume_partition_disengaged_sn2
;
2371 xpc_send_payload
= xpc_send_payload_sn2
;
2372 xpc_received_payload
= xpc_received_payload_sn2
;
2374 if (offsetof(struct xpc_msg_sn2
, payload
) > XPC_MSG_HDR_MAX_SIZE
) {
2375 dev_err(xpc_part
, "header portion of struct xpc_msg_sn2 is "
2376 "larger than %d\n", XPC_MSG_HDR_MAX_SIZE
);
2380 buf_size
= max(XPC_RP_VARS_SIZE
,
2381 XPC_RP_HEADER_SIZE
+ XP_NASID_MASK_BYTES_SN2
);
2382 xpc_remote_copy_buffer_sn2
= xpc_kmalloc_cacheline_aligned(buf_size
,
2384 &xpc_remote_copy_buffer_base_sn2
);
2385 if (xpc_remote_copy_buffer_sn2
== NULL
) {
2386 dev_err(xpc_part
, "can't get memory for remote copy buffer\n");
2390 /* open up protections for IPI and [potentially] amo operations */
2391 xpc_allow_IPI_ops_sn2();
2392 xpc_allow_amo_ops_shub_wars_1_1_sn2();
2395 * This is safe to do before the xpc_hb_checker thread has started
2396 * because the handler releases a wait queue. If an interrupt is
2397 * received before the thread is waiting, it will not go to sleep,
2398 * but rather immediately process the interrupt.
2400 ret
= request_irq(SGI_XPC_ACTIVATE
, xpc_handle_activate_IRQ_sn2
, 0,
2403 dev_err(xpc_part
, "can't register ACTIVATE IRQ handler, "
2404 "errno=%d\n", -ret
);
2405 xpc_disallow_IPI_ops_sn2();
2406 kfree(xpc_remote_copy_buffer_base_sn2
);
2414 free_irq(SGI_XPC_ACTIVATE
, NULL
);
2415 xpc_disallow_IPI_ops_sn2();
2416 kfree(xpc_remote_copy_buffer_base_sn2
);