2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) sn2-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <asm/uncached.h>
19 #include <asm/sn/sn_sal.h>
22 static struct xpc_vars_sn2
*xpc_vars
; /* >>> Add _sn2 suffix? */
23 static struct xpc_vars_part_sn2
*xpc_vars_part
; /* >>> Add _sn2 suffix? */
25 /* SH_IPI_ACCESS shub register value on startup */
26 static u64 xpc_sh1_IPI_access
;
27 static u64 xpc_sh2_IPI_access0
;
28 static u64 xpc_sh2_IPI_access1
;
29 static u64 xpc_sh2_IPI_access2
;
30 static u64 xpc_sh2_IPI_access3
;
33 * Change protections to allow IPI operations.
36 xpc_allow_IPI_ops_sn2(void)
41 /* >>> The following should get moved into SAL. */
44 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0
));
46 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1
));
48 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2
));
50 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3
));
52 for_each_online_node(node
) {
53 nasid
= cnodeid_to_nasid(node
);
54 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS0
),
56 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS1
),
58 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS2
),
60 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS3
),
65 (u64
)HUB_L((u64
*)LOCAL_MMR_ADDR(SH1_IPI_ACCESS
));
67 for_each_online_node(node
) {
68 nasid
= cnodeid_to_nasid(node
);
69 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH1_IPI_ACCESS
),
76 * Restrict protections to disallow IPI operations.
79 xpc_disallow_IPI_ops_sn2(void)
84 /* >>> The following should get moved into SAL. */
86 for_each_online_node(node
) {
87 nasid
= cnodeid_to_nasid(node
);
88 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS0
),
90 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS1
),
92 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS2
),
94 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH2_IPI_ACCESS3
),
98 for_each_online_node(node
) {
99 nasid
= cnodeid_to_nasid(node
);
100 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
, SH1_IPI_ACCESS
),
107 * The following set of functions are used for the sending and receiving of
108 * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
109 * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
110 * is associated with channel activity (SGI_XPC_NOTIFY).
114 xpc_receive_IRQ_amo_sn2(struct amo
*amo
)
116 return FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_CLEAR
);
119 static enum xp_retval
120 xpc_send_IRQ_sn2(struct amo
*amo
, u64 flag
, int nasid
, int phys_cpuid
,
124 unsigned long irq_flags
;
126 local_irq_save(irq_flags
);
128 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
, flag
);
129 sn_send_IPI_phys(nasid
, phys_cpuid
, vector
, 0);
132 * We must always use the nofault function regardless of whether we
133 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
134 * didn't, we'd never know that the other partition is down and would
135 * keep sending IRQs and amos to it until the heartbeat times out.
137 ret
= xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->variable
),
138 xp_nofault_PIOR_target
));
140 local_irq_restore(irq_flags
);
142 return ((ret
== 0) ? xpSuccess
: xpPioReadError
);
146 xpc_init_IRQ_amo_sn2(int index
)
148 struct amo
*amo
= xpc_vars
->amos_page
+ index
;
150 (void)xpc_receive_IRQ_amo_sn2(amo
); /* clear amo variable */
155 * Functions associated with SGI_XPC_ACTIVATE IRQ.
159 * Notify the heartbeat check thread that an activate IRQ has been received.
162 xpc_handle_activate_IRQ_sn2(int irq
, void *dev_id
)
164 atomic_inc(&xpc_activate_IRQ_rcvd
);
165 wake_up_interruptible(&xpc_activate_IRQ_wq
);
170 * Flag the appropriate amo variable and send an IRQ to the specified node.
173 xpc_send_activate_IRQ_sn2(u64 amos_page_pa
, int from_nasid
, int to_nasid
,
176 int w_index
= XPC_NASID_W_INDEX(from_nasid
);
177 int b_index
= XPC_NASID_B_INDEX(from_nasid
);
178 struct amo
*amos
= (struct amo
*)__va(amos_page_pa
+
179 (XPC_ACTIVATE_IRQ_AMOS
*
180 sizeof(struct amo
)));
182 (void)xpc_send_IRQ_sn2(&amos
[w_index
], (1UL << b_index
), to_nasid
,
183 to_phys_cpuid
, SGI_XPC_ACTIVATE
);
187 xpc_send_local_activate_IRQ_sn2(int from_nasid
)
189 int w_index
= XPC_NASID_W_INDEX(from_nasid
);
190 int b_index
= XPC_NASID_B_INDEX(from_nasid
);
191 struct amo
*amos
= (struct amo
*)__va(xpc_vars
->amos_page_pa
+
192 (XPC_ACTIVATE_IRQ_AMOS
*
193 sizeof(struct amo
)));
195 /* fake the sending and receipt of an activate IRQ from remote nasid */
196 FETCHOP_STORE_OP(TO_AMO((u64
)&amos
[w_index
].variable
), FETCHOP_OR
,
198 atomic_inc(&xpc_activate_IRQ_rcvd
);
199 wake_up_interruptible(&xpc_activate_IRQ_wq
);
203 * Functions associated with SGI_XPC_NOTIFY IRQ.
207 * Check to see if any chctl flags were sent from the specified partition.
210 xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition
*part
)
212 union xpc_channel_ctl_flags chctl
;
213 unsigned long irq_flags
;
215 chctl
.all_flags
= xpc_receive_IRQ_amo_sn2(part
->sn
.sn2
.
217 if (chctl
.all_flags
== 0)
220 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
221 part
->chctl
.all_flags
|= chctl
.all_flags
;
222 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
224 dev_dbg(xpc_chan
, "received notify IRQ from partid=%d, chctl.all_flags="
225 "0x%lx\n", XPC_PARTID(part
), chctl
.all_flags
);
227 xpc_wakeup_channel_mgr(part
);
231 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
232 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
233 * than one partition, we use an amo structure per partition to indicate
234 * whether a partition has sent an IRQ or not. If it has, then wake up the
235 * associated kthread to handle it.
237 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
238 * running on other partitions.
240 * Noteworthy Arguments:
242 * irq - Interrupt ReQuest number. NOT USED.
244 * dev_id - partid of IRQ's potential sender.
247 xpc_handle_notify_IRQ_sn2(int irq
, void *dev_id
)
249 short partid
= (short)(u64
)dev_id
;
250 struct xpc_partition
*part
= &xpc_partitions
[partid
];
252 DBUG_ON(partid
< 0 || partid
>= xp_max_npartitions
);
254 if (xpc_part_ref(part
)) {
255 xpc_check_for_sent_chctl_flags_sn2(part
);
257 xpc_part_deref(part
);
263 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
264 * because the write to their associated amo variable completed after the IRQ
268 xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition
*part
)
270 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
272 if (xpc_part_ref(part
)) {
273 xpc_check_for_sent_chctl_flags_sn2(part
);
275 part_sn2
->dropped_notify_IRQ_timer
.expires
= jiffies
+
276 XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL
;
277 add_timer(&part_sn2
->dropped_notify_IRQ_timer
);
278 xpc_part_deref(part
);
283 * Send a notify IRQ to the remote partition that is associated with the
287 xpc_send_notify_IRQ_sn2(struct xpc_channel
*ch
, u8 chctl_flag
,
288 char *chctl_flag_string
, unsigned long *irq_flags
)
290 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
291 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
292 union xpc_channel_ctl_flags chctl
= { 0 };
295 if (likely(part
->act_state
!= XPC_P_DEACTIVATING
)) {
296 chctl
.flags
[ch
->number
] = chctl_flag
;
297 ret
= xpc_send_IRQ_sn2(part_sn2
->remote_chctl_amo_va
,
299 part_sn2
->notify_IRQ_nasid
,
300 part_sn2
->notify_IRQ_phys_cpuid
,
302 dev_dbg(xpc_chan
, "%s sent to partid=%d, channel=%d, ret=%d\n",
303 chctl_flag_string
, ch
->partid
, ch
->number
, ret
);
304 if (unlikely(ret
!= xpSuccess
)) {
305 if (irq_flags
!= NULL
)
306 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
307 XPC_DEACTIVATE_PARTITION(part
, ret
);
308 if (irq_flags
!= NULL
)
309 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
314 #define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
315 xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
318 * Make it look like the remote partition, which is associated with the
319 * specified channel, sent us a notify IRQ. This faked IRQ will be handled
320 * by xpc_check_for_dropped_notify_IRQ_sn2().
323 xpc_send_local_notify_IRQ_sn2(struct xpc_channel
*ch
, u8 chctl_flag
,
324 char *chctl_flag_string
)
326 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
327 union xpc_channel_ctl_flags chctl
= { 0 };
329 chctl
.flags
[ch
->number
] = chctl_flag
;
330 FETCHOP_STORE_OP(TO_AMO((u64
)&part
->sn
.sn2
.local_chctl_amo_va
->
331 variable
), FETCHOP_OR
, chctl
.all_flags
);
332 dev_dbg(xpc_chan
, "%s sent local from partid=%d, channel=%d\n",
333 chctl_flag_string
, ch
->partid
, ch
->number
);
336 #define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
337 xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
340 xpc_send_chctl_closerequest_sn2(struct xpc_channel
*ch
,
341 unsigned long *irq_flags
)
343 struct xpc_openclose_args
*args
= ch
->local_openclose_args
;
345 args
->reason
= ch
->reason
;
346 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_CLOSEREQUEST
, irq_flags
);
350 xpc_send_chctl_closereply_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
352 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_CLOSEREPLY
, irq_flags
);
356 xpc_send_chctl_openrequest_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
358 struct xpc_openclose_args
*args
= ch
->local_openclose_args
;
360 args
->msg_size
= ch
->msg_size
;
361 args
->local_nentries
= ch
->local_nentries
;
362 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_OPENREQUEST
, irq_flags
);
366 xpc_send_chctl_openreply_sn2(struct xpc_channel
*ch
, unsigned long *irq_flags
)
368 struct xpc_openclose_args
*args
= ch
->local_openclose_args
;
370 args
->remote_nentries
= ch
->remote_nentries
;
371 args
->local_nentries
= ch
->local_nentries
;
372 args
->local_msgqueue_pa
= __pa(ch
->local_msgqueue
);
373 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_OPENREPLY
, irq_flags
);
377 xpc_send_chctl_msgrequest_sn2(struct xpc_channel
*ch
)
379 XPC_SEND_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_MSGREQUEST
, NULL
);
383 xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel
*ch
)
385 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch
, XPC_CHCTL_MSGREQUEST
);
389 * This next set of functions are used to keep track of when a partition is
390 * potentially engaged in accessing memory belonging to another partition.
394 xpc_indicate_partition_engaged_sn2(struct xpc_partition
*part
)
396 unsigned long irq_flags
;
397 struct amo
*amo
= (struct amo
*)__va(part
->sn
.sn2
.remote_amos_page_pa
+
398 (XPC_ENGAGED_PARTITIONS_AMO
*
399 sizeof(struct amo
)));
401 local_irq_save(irq_flags
);
403 /* set bit corresponding to our partid in remote partition's amo */
404 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
,
405 (1UL << sn_partition_id
));
407 * We must always use the nofault function regardless of whether we
408 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
409 * didn't, we'd never know that the other partition is down and would
410 * keep sending IRQs and amos to it until the heartbeat times out.
412 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
414 xp_nofault_PIOR_target
));
416 local_irq_restore(irq_flags
);
420 xpc_indicate_partition_disengaged_sn2(struct xpc_partition
*part
)
422 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
423 unsigned long irq_flags
;
424 struct amo
*amo
= (struct amo
*)__va(part_sn2
->remote_amos_page_pa
+
425 (XPC_ENGAGED_PARTITIONS_AMO
*
426 sizeof(struct amo
)));
428 local_irq_save(irq_flags
);
430 /* clear bit corresponding to our partid in remote partition's amo */
431 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
432 ~(1UL << sn_partition_id
));
434 * We must always use the nofault function regardless of whether we
435 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
436 * didn't, we'd never know that the other partition is down and would
437 * keep sending IRQs and amos to it until the heartbeat times out.
439 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
441 xp_nofault_PIOR_target
));
443 local_irq_restore(irq_flags
);
446 * Send activate IRQ to get other side to see that we've cleared our
447 * bit in their engaged partitions amo.
449 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
451 part_sn2
->activate_IRQ_nasid
,
452 part_sn2
->activate_IRQ_phys_cpuid
);
456 xpc_partition_engaged_sn2(short partid
)
458 struct amo
*amo
= xpc_vars
->amos_page
+ XPC_ENGAGED_PARTITIONS_AMO
;
460 /* our partition's amo variable ANDed with partid mask */
461 return (FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) &
462 (1UL << partid
)) != 0;
466 xpc_any_partition_engaged_sn2(void)
468 struct amo
*amo
= xpc_vars
->amos_page
+ XPC_ENGAGED_PARTITIONS_AMO
;
470 /* our partition's amo variable */
471 return FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) != 0;
475 xpc_assume_partition_disengaged_sn2(short partid
)
477 struct amo
*amo
= xpc_vars
->amos_page
+ XPC_ENGAGED_PARTITIONS_AMO
;
479 /* clear bit(s) based on partid mask in our partition's amo */
480 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
484 /* original protection values for each node */
485 static u64 xpc_prot_vec_sn2
[MAX_NUMNODES
];
488 * Change protections to allow amo operations on non-Shub 1.1 systems.
490 static enum xp_retval
491 xpc_allow_amo_ops_sn2(struct amo
*amos_page
)
497 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
498 * collides with memory operations. On those systems we call
499 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
501 if (!enable_shub_wars_1_1()) {
502 ret
= sn_change_memprotect(ia64_tpa((u64
)amos_page
), PAGE_SIZE
,
503 SN_MEMPROT_ACCESS_CLASS_1
,
512 * Change protections to allow amo operations on Shub 1.1 systems.
515 xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
520 if (!enable_shub_wars_1_1())
523 for_each_online_node(node
) {
524 nasid
= cnodeid_to_nasid(node
);
525 /* save current protection values */
526 xpc_prot_vec_sn2
[node
] =
527 (u64
)HUB_L((u64
*)GLOBAL_MMR_ADDR(nasid
,
528 SH1_MD_DQLP_MMR_DIR_PRIVEC0
));
529 /* open up everything */
530 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
,
531 SH1_MD_DQLP_MMR_DIR_PRIVEC0
),
533 HUB_S((u64
*)GLOBAL_MMR_ADDR(nasid
,
534 SH1_MD_DQRP_MMR_DIR_PRIVEC0
),
539 static enum xp_retval
540 xpc_rsvd_page_init_sn2(struct xpc_rsvd_page
*rp
)
542 struct amo
*amos_page
;
546 xpc_vars
= XPC_RP_VARS(rp
);
548 rp
->sn
.vars_pa
= __pa(xpc_vars
);
550 /* vars_part array follows immediately after vars */
551 xpc_vars_part
= (struct xpc_vars_part_sn2
*)((u8
*)XPC_RP_VARS(rp
) +
555 * Before clearing xpc_vars, see if a page of amos had been previously
556 * allocated. If not we'll need to allocate one and set permissions
557 * so that cross-partition amos are allowed.
559 * The allocated amo page needs MCA reporting to remain disabled after
560 * XPC has unloaded. To make this work, we keep a copy of the pointer
561 * to this page (i.e., amos_page) in the struct xpc_vars structure,
562 * which is pointed to by the reserved page, and re-use that saved copy
563 * on subsequent loads of XPC. This amo page is never freed, and its
564 * memory protections are never restricted.
566 amos_page
= xpc_vars
->amos_page
;
567 if (amos_page
== NULL
) {
568 amos_page
= (struct amo
*)TO_AMO(uncached_alloc_page(0, 1));
569 if (amos_page
== NULL
) {
570 dev_err(xpc_part
, "can't allocate page of amos\n");
575 * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
576 * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
578 ret
= xpc_allow_amo_ops_sn2(amos_page
);
579 if (ret
!= xpSuccess
) {
580 dev_err(xpc_part
, "can't allow amo operations\n");
581 uncached_free_page(__IA64_UNCACHED_OFFSET
|
582 TO_PHYS((u64
)amos_page
), 1);
588 memset(xpc_vars
, 0, sizeof(struct xpc_vars_sn2
));
590 xpc_vars
->version
= XPC_V_VERSION
;
591 xpc_vars
->activate_IRQ_nasid
= cpuid_to_nasid(0);
592 xpc_vars
->activate_IRQ_phys_cpuid
= cpu_physical_id(0);
593 xpc_vars
->vars_part_pa
= __pa(xpc_vars_part
);
594 xpc_vars
->amos_page_pa
= ia64_tpa((u64
)amos_page
);
595 xpc_vars
->amos_page
= amos_page
; /* save for next load of XPC */
597 /* clear xpc_vars_part */
598 memset((u64
*)xpc_vars_part
, 0, sizeof(struct xpc_vars_part_sn2
) *
601 /* initialize the activate IRQ related amo variables */
602 for (i
= 0; i
< xp_nasid_mask_words
; i
++)
603 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS
+ i
);
605 /* initialize the engaged remote partitions related amo variables */
606 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO
);
607 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO
);
613 xpc_increment_heartbeat_sn2(void)
615 xpc_vars
->heartbeat
++;
619 xpc_offline_heartbeat_sn2(void)
621 xpc_increment_heartbeat_sn2();
622 xpc_vars
->heartbeat_offline
= 1;
626 xpc_online_heartbeat_sn2(void)
628 xpc_increment_heartbeat_sn2();
629 xpc_vars
->heartbeat_offline
= 0;
633 xpc_heartbeat_init_sn2(void)
635 DBUG_ON(xpc_vars
== NULL
);
637 bitmap_zero(xpc_vars
->heartbeating_to_mask
, XP_MAX_NPARTITIONS_SN2
);
638 xpc_heartbeating_to_mask
= &xpc_vars
->heartbeating_to_mask
[0];
639 xpc_online_heartbeat_sn2();
643 xpc_heartbeat_exit_sn2(void)
645 xpc_offline_heartbeat_sn2();
649 * At periodic intervals, scan through all active partitions and ensure
650 * their heartbeat is still active. If not, the partition is deactivated.
653 xpc_check_remote_hb_sn2(void)
655 struct xpc_vars_sn2
*remote_vars
;
656 struct xpc_partition
*part
;
660 remote_vars
= (struct xpc_vars_sn2
*)xpc_remote_copy_buffer
;
662 for (partid
= 0; partid
< xp_max_npartitions
; partid
++) {
667 if (partid
== sn_partition_id
)
670 part
= &xpc_partitions
[partid
];
672 if (part
->act_state
== XPC_P_INACTIVE
||
673 part
->act_state
== XPC_P_DEACTIVATING
) {
677 /* pull the remote_hb cache line */
678 ret
= xp_remote_memcpy(remote_vars
,
679 (void *)part
->sn
.sn2
.remote_vars_pa
,
681 if (ret
!= xpSuccess
) {
682 XPC_DEACTIVATE_PARTITION(part
, ret
);
686 dev_dbg(xpc_part
, "partid = %d, heartbeat = %ld, last_heartbeat"
687 " = %ld, heartbeat_offline = %ld, HB_mask[0] = 0x%lx\n",
688 partid
, remote_vars
->heartbeat
, part
->last_heartbeat
,
689 remote_vars
->heartbeat_offline
,
690 remote_vars
->heartbeating_to_mask
[0]);
692 if (((remote_vars
->heartbeat
== part
->last_heartbeat
) &&
693 (remote_vars
->heartbeat_offline
== 0)) ||
694 !xpc_hb_allowed(sn_partition_id
,
695 &remote_vars
->heartbeating_to_mask
)) {
697 XPC_DEACTIVATE_PARTITION(part
, xpNoHeartbeat
);
701 part
->last_heartbeat
= remote_vars
->heartbeat
;
706 * Get a copy of the remote partition's XPC variables from the reserved page.
708 * remote_vars points to a buffer that is cacheline aligned for BTE copies and
709 * assumed to be of size XPC_RP_VARS_SIZE.
711 static enum xp_retval
712 xpc_get_remote_vars_sn2(u64 remote_vars_pa
, struct xpc_vars_sn2
*remote_vars
)
716 if (remote_vars_pa
== 0)
719 /* pull over the cross partition variables */
720 ret
= xp_remote_memcpy(remote_vars
, (void *)remote_vars_pa
,
722 if (ret
!= xpSuccess
)
725 if (XPC_VERSION_MAJOR(remote_vars
->version
) !=
726 XPC_VERSION_MAJOR(XPC_V_VERSION
)) {
734 xpc_request_partition_activation_sn2(struct xpc_rsvd_page
*remote_rp
,
735 u64 remote_rp_pa
, int nasid
)
737 xpc_send_local_activate_IRQ_sn2(nasid
);
741 xpc_request_partition_reactivation_sn2(struct xpc_partition
*part
)
743 xpc_send_local_activate_IRQ_sn2(part
->sn
.sn2
.activate_IRQ_nasid
);
747 xpc_request_partition_deactivation_sn2(struct xpc_partition
*part
)
749 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
750 unsigned long irq_flags
;
751 struct amo
*amo
= (struct amo
*)__va(part_sn2
->remote_amos_page_pa
+
752 (XPC_DEACTIVATE_REQUEST_AMO
*
753 sizeof(struct amo
)));
755 local_irq_save(irq_flags
);
757 /* set bit corresponding to our partid in remote partition's amo */
758 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_OR
,
759 (1UL << sn_partition_id
));
761 * We must always use the nofault function regardless of whether we
762 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
763 * didn't, we'd never know that the other partition is down and would
764 * keep sending IRQs and amos to it until the heartbeat times out.
766 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
768 xp_nofault_PIOR_target
));
770 local_irq_restore(irq_flags
);
773 * Send activate IRQ to get other side to see that we've set our
774 * bit in their deactivate request amo.
776 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
778 part_sn2
->activate_IRQ_nasid
,
779 part_sn2
->activate_IRQ_phys_cpuid
);
783 xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition
*part
)
785 unsigned long irq_flags
;
786 struct amo
*amo
= (struct amo
*)__va(part
->sn
.sn2
.remote_amos_page_pa
+
787 (XPC_DEACTIVATE_REQUEST_AMO
*
788 sizeof(struct amo
)));
790 local_irq_save(irq_flags
);
792 /* clear bit corresponding to our partid in remote partition's amo */
793 FETCHOP_STORE_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_AND
,
794 ~(1UL << sn_partition_id
));
796 * We must always use the nofault function regardless of whether we
797 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
798 * didn't, we'd never know that the other partition is down and would
799 * keep sending IRQs and amos to it until the heartbeat times out.
801 (void)xp_nofault_PIOR((u64
*)GLOBAL_MMR_ADDR(NASID_GET(&amo
->
803 xp_nofault_PIOR_target
));
805 local_irq_restore(irq_flags
);
809 xpc_partition_deactivation_requested_sn2(short partid
)
811 struct amo
*amo
= xpc_vars
->amos_page
+ XPC_DEACTIVATE_REQUEST_AMO
;
813 /* our partition's amo variable ANDed with partid mask */
814 return (FETCHOP_LOAD_OP(TO_AMO((u64
)&amo
->variable
), FETCHOP_LOAD
) &
815 (1UL << partid
)) != 0;
819 * Update the remote partition's info.
822 xpc_update_partition_info_sn2(struct xpc_partition
*part
, u8 remote_rp_version
,
823 unsigned long *remote_rp_stamp
, u64 remote_rp_pa
,
825 struct xpc_vars_sn2
*remote_vars
)
827 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
829 part
->remote_rp_version
= remote_rp_version
;
830 dev_dbg(xpc_part
, " remote_rp_version = 0x%016x\n",
831 part
->remote_rp_version
);
833 part
->remote_rp_stamp
= *remote_rp_stamp
;
834 dev_dbg(xpc_part
, " remote_rp_stamp = 0x%016lx\n",
835 part
->remote_rp_stamp
);
837 part
->remote_rp_pa
= remote_rp_pa
;
838 dev_dbg(xpc_part
, " remote_rp_pa = 0x%016lx\n", part
->remote_rp_pa
);
840 part_sn2
->remote_vars_pa
= remote_vars_pa
;
841 dev_dbg(xpc_part
, " remote_vars_pa = 0x%016lx\n",
842 part_sn2
->remote_vars_pa
);
844 part
->last_heartbeat
= remote_vars
->heartbeat
;
845 dev_dbg(xpc_part
, " last_heartbeat = 0x%016lx\n",
846 part
->last_heartbeat
);
848 part_sn2
->remote_vars_part_pa
= remote_vars
->vars_part_pa
;
849 dev_dbg(xpc_part
, " remote_vars_part_pa = 0x%016lx\n",
850 part_sn2
->remote_vars_part_pa
);
852 part_sn2
->activate_IRQ_nasid
= remote_vars
->activate_IRQ_nasid
;
853 dev_dbg(xpc_part
, " activate_IRQ_nasid = 0x%x\n",
854 part_sn2
->activate_IRQ_nasid
);
856 part_sn2
->activate_IRQ_phys_cpuid
=
857 remote_vars
->activate_IRQ_phys_cpuid
;
858 dev_dbg(xpc_part
, " activate_IRQ_phys_cpuid = 0x%x\n",
859 part_sn2
->activate_IRQ_phys_cpuid
);
861 part_sn2
->remote_amos_page_pa
= remote_vars
->amos_page_pa
;
862 dev_dbg(xpc_part
, " remote_amos_page_pa = 0x%lx\n",
863 part_sn2
->remote_amos_page_pa
);
865 part_sn2
->remote_vars_version
= remote_vars
->version
;
866 dev_dbg(xpc_part
, " remote_vars_version = 0x%x\n",
867 part_sn2
->remote_vars_version
);
871 * Prior code has determined the nasid which generated a activate IRQ.
872 * Inspect that nasid to determine if its partition needs to be activated
875 * A partition is considered "awaiting activation" if our partition
876 * flags indicate it is not active and it has a heartbeat. A
877 * partition is considered "awaiting deactivation" if our partition
878 * flags indicate it is active but it has no heartbeat or it is not
879 * sending its heartbeat to us.
881 * To determine the heartbeat, the remote nasid must have a properly
882 * initialized reserved page.
885 xpc_identify_activate_IRQ_req_sn2(int nasid
)
887 struct xpc_rsvd_page
*remote_rp
;
888 struct xpc_vars_sn2
*remote_vars
;
891 int remote_rp_version
;
893 unsigned long remote_rp_stamp
= 0;
895 struct xpc_partition
*part
;
896 struct xpc_partition_sn2
*part_sn2
;
899 /* pull over the reserved page structure */
901 remote_rp
= (struct xpc_rsvd_page
*)xpc_remote_copy_buffer
;
903 ret
= xpc_get_remote_rp(nasid
, NULL
, remote_rp
, &remote_rp_pa
);
904 if (ret
!= xpSuccess
) {
905 dev_warn(xpc_part
, "unable to get reserved page from nasid %d, "
906 "which sent interrupt, reason=%d\n", nasid
, ret
);
910 remote_vars_pa
= remote_rp
->sn
.vars_pa
;
911 remote_rp_version
= remote_rp
->version
;
912 remote_rp_stamp
= remote_rp
->stamp
;
914 partid
= remote_rp
->SAL_partid
;
915 part
= &xpc_partitions
[partid
];
916 part_sn2
= &part
->sn
.sn2
;
918 /* pull over the cross partition variables */
920 remote_vars
= (struct xpc_vars_sn2
*)xpc_remote_copy_buffer
;
922 ret
= xpc_get_remote_vars_sn2(remote_vars_pa
, remote_vars
);
923 if (ret
!= xpSuccess
) {
924 dev_warn(xpc_part
, "unable to get XPC variables from nasid %d, "
925 "which sent interrupt, reason=%d\n", nasid
, ret
);
927 XPC_DEACTIVATE_PARTITION(part
, ret
);
931 part
->activate_IRQ_rcvd
++;
933 dev_dbg(xpc_part
, "partid for nasid %d is %d; IRQs = %d; HB = "
934 "%ld:0x%lx\n", (int)nasid
, (int)partid
, part
->activate_IRQ_rcvd
,
935 remote_vars
->heartbeat
, remote_vars
->heartbeating_to_mask
[0]);
937 if (xpc_partition_disengaged(part
) &&
938 part
->act_state
== XPC_P_INACTIVE
) {
940 xpc_update_partition_info_sn2(part
, remote_rp_version
,
941 &remote_rp_stamp
, remote_rp_pa
,
942 remote_vars_pa
, remote_vars
);
944 if (xpc_partition_deactivation_requested_sn2(partid
)) {
946 * Other side is waiting on us to deactivate even though
952 xpc_activate_partition(part
);
956 DBUG_ON(part
->remote_rp_version
== 0);
957 DBUG_ON(part_sn2
->remote_vars_version
== 0);
959 if (remote_rp_stamp
!= part
->remote_rp_stamp
) {
961 /* the other side rebooted */
963 DBUG_ON(xpc_partition_engaged_sn2(partid
));
964 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid
));
966 xpc_update_partition_info_sn2(part
, remote_rp_version
,
967 &remote_rp_stamp
, remote_rp_pa
,
968 remote_vars_pa
, remote_vars
);
972 if (part
->disengage_timeout
> 0 && !xpc_partition_disengaged(part
)) {
973 /* still waiting on other side to disengage from us */
978 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
979 else if (xpc_partition_deactivation_requested_sn2(partid
))
980 XPC_DEACTIVATE_PARTITION(part
, xpOtherGoingDown
);
984 * Loop through the activation amo variables and process any bits
985 * which are set. Each bit indicates a nasid sending a partition
986 * activation or deactivation request.
988 * Return #of IRQs detected.
991 xpc_identify_activate_IRQ_sender_sn2(void)
995 u64 nasid
; /* remote nasid */
996 int n_IRQs_detected
= 0;
997 struct amo
*act_amos
;
999 act_amos
= xpc_vars
->amos_page
+ XPC_ACTIVATE_IRQ_AMOS
;
1001 /* scan through act amo variable looking for non-zero entries */
1002 for (word
= 0; word
< xp_nasid_mask_words
; word
++) {
1007 nasid_mask
= xpc_receive_IRQ_amo_sn2(&act_amos
[word
]);
1008 if (nasid_mask
== 0) {
1009 /* no IRQs from nasids in this variable */
1013 dev_dbg(xpc_part
, "amo[%d] gave back 0x%lx\n", word
,
1017 * If this nasid has been added to the machine since
1018 * our partition was reset, this will retain the
1019 * remote nasid in our reserved pages machine mask.
1020 * This is used in the event of module reload.
1022 xpc_mach_nasids
[word
] |= nasid_mask
;
1024 /* locate the nasid(s) which sent interrupts */
1026 for (bit
= 0; bit
< (8 * sizeof(u64
)); bit
++) {
1027 if (nasid_mask
& (1UL << bit
)) {
1029 nasid
= XPC_NASID_FROM_W_B(word
, bit
);
1030 dev_dbg(xpc_part
, "interrupt from nasid %ld\n",
1032 xpc_identify_activate_IRQ_req_sn2(nasid
);
1036 return n_IRQs_detected
;
1040 xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected
)
1042 int n_IRQs_detected
;
1044 n_IRQs_detected
= xpc_identify_activate_IRQ_sender_sn2();
1045 if (n_IRQs_detected
< n_IRQs_expected
) {
1046 /* retry once to help avoid missing amo */
1047 (void)xpc_identify_activate_IRQ_sender_sn2();
1052 * Setup the infrastructure necessary to support XPartition Communication
1053 * between the specified remote partition and the local one.
1055 static enum xp_retval
1056 xpc_setup_infrastructure_sn2(struct xpc_partition
*part
)
1058 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1059 enum xp_retval retval
;
1063 struct xpc_channel
*ch
;
1064 struct timer_list
*timer
;
1065 short partid
= XPC_PARTID(part
);
1068 * Allocate all of the channel structures as a contiguous chunk of
1071 DBUG_ON(part
->channels
!= NULL
);
1072 part
->channels
= kzalloc(sizeof(struct xpc_channel
) * XPC_MAX_NCHANNELS
,
1074 if (part
->channels
== NULL
) {
1075 dev_err(xpc_chan
, "can't get memory for channels\n");
1079 /* allocate all the required GET/PUT values */
1081 part_sn2
->local_GPs
= xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE
,
1085 if (part_sn2
->local_GPs
== NULL
) {
1086 dev_err(xpc_chan
, "can't get memory for local get/put "
1088 retval
= xpNoMemory
;
1092 part_sn2
->remote_GPs
= xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE
,
1096 if (part_sn2
->remote_GPs
== NULL
) {
1097 dev_err(xpc_chan
, "can't get memory for remote get/put "
1099 retval
= xpNoMemory
;
1103 part_sn2
->remote_GPs_pa
= 0;
1105 /* allocate all the required open and close args */
1107 part
->local_openclose_args
=
1108 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE
, GFP_KERNEL
,
1109 &part
->local_openclose_args_base
);
1110 if (part
->local_openclose_args
== NULL
) {
1111 dev_err(xpc_chan
, "can't get memory for local connect args\n");
1112 retval
= xpNoMemory
;
1116 part
->remote_openclose_args
=
1117 xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE
, GFP_KERNEL
,
1118 &part
->remote_openclose_args_base
);
1119 if (part
->remote_openclose_args
== NULL
) {
1120 dev_err(xpc_chan
, "can't get memory for remote connect args\n");
1121 retval
= xpNoMemory
;
1125 part_sn2
->remote_openclose_args_pa
= 0;
1127 part_sn2
->local_chctl_amo_va
= xpc_init_IRQ_amo_sn2(partid
);
1128 part
->chctl
.all_flags
= 0;
1129 spin_lock_init(&part
->chctl_lock
);
1131 part_sn2
->notify_IRQ_nasid
= 0;
1132 part_sn2
->notify_IRQ_phys_cpuid
= 0;
1133 part_sn2
->remote_chctl_amo_va
= NULL
;
1135 atomic_set(&part
->channel_mgr_requests
, 1);
1136 init_waitqueue_head(&part
->channel_mgr_wq
);
1138 sprintf(part_sn2
->notify_IRQ_owner
, "xpc%02d", partid
);
1139 ret
= request_irq(SGI_XPC_NOTIFY
, xpc_handle_notify_IRQ_sn2
,
1140 IRQF_SHARED
, part_sn2
->notify_IRQ_owner
,
1141 (void *)(u64
)partid
);
1143 dev_err(xpc_chan
, "can't register NOTIFY IRQ handler, "
1144 "errno=%d\n", -ret
);
1145 retval
= xpLackOfResources
;
1149 /* Setup a timer to check for dropped notify IRQs */
1150 timer
= &part_sn2
->dropped_notify_IRQ_timer
;
1153 (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2
;
1154 timer
->data
= (unsigned long)part
;
1155 timer
->expires
= jiffies
+ XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL
;
1158 part
->nchannels
= XPC_MAX_NCHANNELS
;
1160 atomic_set(&part
->nchannels_active
, 0);
1161 atomic_set(&part
->nchannels_engaged
, 0);
1163 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
1164 ch
= &part
->channels
[ch_number
];
1166 ch
->partid
= partid
;
1167 ch
->number
= ch_number
;
1168 ch
->flags
= XPC_C_DISCONNECTED
;
1170 ch
->sn
.sn2
.local_GP
= &part_sn2
->local_GPs
[ch_number
];
1171 ch
->local_openclose_args
=
1172 &part
->local_openclose_args
[ch_number
];
1174 atomic_set(&ch
->kthreads_assigned
, 0);
1175 atomic_set(&ch
->kthreads_idle
, 0);
1176 atomic_set(&ch
->kthreads_active
, 0);
1178 atomic_set(&ch
->references
, 0);
1179 atomic_set(&ch
->n_to_notify
, 0);
1181 spin_lock_init(&ch
->lock
);
1182 mutex_init(&ch
->sn
.sn2
.msg_to_pull_mutex
);
1183 init_completion(&ch
->wdisconnect_wait
);
1185 atomic_set(&ch
->n_on_msg_allocate_wq
, 0);
1186 init_waitqueue_head(&ch
->msg_allocate_wq
);
1187 init_waitqueue_head(&ch
->idle_wq
);
1191 * With the setting of the partition setup_state to XPC_P_SETUP, we're
1192 * declaring that this partition is ready to go.
1194 part
->setup_state
= XPC_P_SETUP
;
1197 * Setup the per partition specific variables required by the
1198 * remote partition to establish channel connections with us.
1200 * The setting of the magic # indicates that these per partition
1201 * specific variables are ready to be used.
1203 xpc_vars_part
[partid
].GPs_pa
= __pa(part_sn2
->local_GPs
);
1204 xpc_vars_part
[partid
].openclose_args_pa
=
1205 __pa(part
->local_openclose_args
);
1206 xpc_vars_part
[partid
].chctl_amo_pa
= __pa(part_sn2
->local_chctl_amo_va
);
1207 cpuid
= raw_smp_processor_id(); /* any CPU in this partition will do */
1208 xpc_vars_part
[partid
].notify_IRQ_nasid
= cpuid_to_nasid(cpuid
);
1209 xpc_vars_part
[partid
].notify_IRQ_phys_cpuid
= cpu_physical_id(cpuid
);
1210 xpc_vars_part
[partid
].nchannels
= part
->nchannels
;
1211 xpc_vars_part
[partid
].magic
= XPC_VP_MAGIC1
;
1215 /* setup of infrastructure failed */
1217 kfree(part
->remote_openclose_args_base
);
1218 part
->remote_openclose_args
= NULL
;
1220 kfree(part
->local_openclose_args_base
);
1221 part
->local_openclose_args
= NULL
;
1223 kfree(part_sn2
->remote_GPs_base
);
1224 part_sn2
->remote_GPs
= NULL
;
1226 kfree(part_sn2
->local_GPs_base
);
1227 part_sn2
->local_GPs
= NULL
;
1229 kfree(part
->channels
);
1230 part
->channels
= NULL
;
1235 * Teardown the infrastructure necessary to support XPartition Communication
1236 * between the specified remote partition and the local one.
1239 xpc_teardown_infrastructure_sn2(struct xpc_partition
*part
)
1241 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1242 short partid
= XPC_PARTID(part
);
1245 * We start off by making this partition inaccessible to local
1246 * processes by marking it as no longer setup. Then we make it
1247 * inaccessible to remote processes by clearing the XPC per partition
1248 * specific variable's magic # (which indicates that these variables
1249 * are no longer valid) and by ignoring all XPC notify IRQs sent to
1253 DBUG_ON(atomic_read(&part
->nchannels_engaged
) != 0);
1254 DBUG_ON(atomic_read(&part
->nchannels_active
) != 0);
1255 DBUG_ON(part
->setup_state
!= XPC_P_SETUP
);
1256 part
->setup_state
= XPC_P_WTEARDOWN
;
1258 xpc_vars_part
[partid
].magic
= 0;
1260 free_irq(SGI_XPC_NOTIFY
, (void *)(u64
)partid
);
1263 * Before proceeding with the teardown we have to wait until all
1264 * existing references cease.
1266 wait_event(part
->teardown_wq
, (atomic_read(&part
->references
) == 0));
1268 /* now we can begin tearing down the infrastructure */
1270 part
->setup_state
= XPC_P_TORNDOWN
;
1272 /* in case we've still got outstanding timers registered... */
1273 del_timer_sync(&part_sn2
->dropped_notify_IRQ_timer
);
1275 kfree(part
->remote_openclose_args_base
);
1276 part
->remote_openclose_args
= NULL
;
1277 kfree(part
->local_openclose_args_base
);
1278 part
->local_openclose_args
= NULL
;
1279 kfree(part_sn2
->remote_GPs_base
);
1280 part_sn2
->remote_GPs
= NULL
;
1281 kfree(part_sn2
->local_GPs_base
);
1282 part_sn2
->local_GPs
= NULL
;
1283 kfree(part
->channels
);
1284 part
->channels
= NULL
;
1285 part_sn2
->local_chctl_amo_va
= NULL
;
1289 * Create a wrapper that hides the underlying mechanism for pulling a cacheline
1290 * (or multiple cachelines) from a remote partition.
1292 * src must be a cacheline aligned physical address on the remote partition.
1293 * dst must be a cacheline aligned virtual address on this partition.
1294 * cnt must be cacheline sized
1296 /* >>> Replace this function by call to xp_remote_memcpy() or bte_copy()? */
1297 static enum xp_retval
1298 xpc_pull_remote_cachelines_sn2(struct xpc_partition
*part
, void *dst
,
1299 const void *src
, size_t cnt
)
1303 DBUG_ON((u64
)src
!= L1_CACHE_ALIGN((u64
)src
));
1304 DBUG_ON((u64
)dst
!= L1_CACHE_ALIGN((u64
)dst
));
1305 DBUG_ON(cnt
!= L1_CACHE_ALIGN(cnt
));
1307 if (part
->act_state
== XPC_P_DEACTIVATING
)
1308 return part
->reason
;
1310 ret
= xp_remote_memcpy(dst
, src
, cnt
);
1311 if (ret
!= xpSuccess
) {
1312 dev_dbg(xpc_chan
, "xp_remote_memcpy() from partition %d failed,"
1313 " ret=%d\n", XPC_PARTID(part
), ret
);
1319 * Pull the remote per partition specific variables from the specified
1322 static enum xp_retval
1323 xpc_pull_remote_vars_part_sn2(struct xpc_partition
*part
)
1325 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1326 u8 buffer
[L1_CACHE_BYTES
* 2];
1327 struct xpc_vars_part_sn2
*pulled_entry_cacheline
=
1328 (struct xpc_vars_part_sn2
*)L1_CACHE_ALIGN((u64
)buffer
);
1329 struct xpc_vars_part_sn2
*pulled_entry
;
1330 u64 remote_entry_cacheline_pa
, remote_entry_pa
;
1331 short partid
= XPC_PARTID(part
);
1334 /* pull the cacheline that contains the variables we're interested in */
1336 DBUG_ON(part_sn2
->remote_vars_part_pa
!=
1337 L1_CACHE_ALIGN(part_sn2
->remote_vars_part_pa
));
1338 DBUG_ON(sizeof(struct xpc_vars_part_sn2
) != L1_CACHE_BYTES
/ 2);
1340 remote_entry_pa
= part_sn2
->remote_vars_part_pa
+
1341 sn_partition_id
* sizeof(struct xpc_vars_part_sn2
);
1343 remote_entry_cacheline_pa
= (remote_entry_pa
& ~(L1_CACHE_BYTES
- 1));
1345 pulled_entry
= (struct xpc_vars_part_sn2
*)((u64
)pulled_entry_cacheline
1346 + (remote_entry_pa
&
1347 (L1_CACHE_BYTES
- 1)));
1349 ret
= xpc_pull_remote_cachelines_sn2(part
, pulled_entry_cacheline
,
1350 (void *)remote_entry_cacheline_pa
,
1352 if (ret
!= xpSuccess
) {
1353 dev_dbg(xpc_chan
, "failed to pull XPC vars_part from "
1354 "partition %d, ret=%d\n", partid
, ret
);
1358 /* see if they've been set up yet */
1360 if (pulled_entry
->magic
!= XPC_VP_MAGIC1
&&
1361 pulled_entry
->magic
!= XPC_VP_MAGIC2
) {
1363 if (pulled_entry
->magic
!= 0) {
1364 dev_dbg(xpc_chan
, "partition %d's XPC vars_part for "
1365 "partition %d has bad magic value (=0x%lx)\n",
1366 partid
, sn_partition_id
, pulled_entry
->magic
);
1370 /* they've not been initialized yet */
1374 if (xpc_vars_part
[partid
].magic
== XPC_VP_MAGIC1
) {
1376 /* validate the variables */
1378 if (pulled_entry
->GPs_pa
== 0 ||
1379 pulled_entry
->openclose_args_pa
== 0 ||
1380 pulled_entry
->chctl_amo_pa
== 0) {
1382 dev_err(xpc_chan
, "partition %d's XPC vars_part for "
1383 "partition %d are not valid\n", partid
,
1385 return xpInvalidAddress
;
1388 /* the variables we imported look to be valid */
1390 part_sn2
->remote_GPs_pa
= pulled_entry
->GPs_pa
;
1391 part_sn2
->remote_openclose_args_pa
=
1392 pulled_entry
->openclose_args_pa
;
1393 part_sn2
->remote_chctl_amo_va
=
1394 (struct amo
*)__va(pulled_entry
->chctl_amo_pa
);
1395 part_sn2
->notify_IRQ_nasid
= pulled_entry
->notify_IRQ_nasid
;
1396 part_sn2
->notify_IRQ_phys_cpuid
=
1397 pulled_entry
->notify_IRQ_phys_cpuid
;
1399 if (part
->nchannels
> pulled_entry
->nchannels
)
1400 part
->nchannels
= pulled_entry
->nchannels
;
1402 /* let the other side know that we've pulled their variables */
1404 xpc_vars_part
[partid
].magic
= XPC_VP_MAGIC2
;
1407 if (pulled_entry
->magic
== XPC_VP_MAGIC1
)
1414 * Establish first contact with the remote partititon. This involves pulling
1415 * the XPC per partition variables from the remote partition and waiting for
1416 * the remote partition to pull ours.
1418 static enum xp_retval
1419 xpc_make_first_contact_sn2(struct xpc_partition
*part
)
1421 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1425 * Register the remote partition's amos with SAL so it can handle
1426 * and cleanup errors within that address range should the remote
1427 * partition go down. We don't unregister this range because it is
1428 * difficult to tell when outstanding writes to the remote partition
1429 * are finished and thus when it is safe to unregister. This should
1430 * not result in wasted space in the SAL xp_addr_region table because
1431 * we should get the same page for remote_amos_page_pa after module
1432 * reloads and system reboots.
1434 if (sn_register_xp_addr_region(part_sn2
->remote_amos_page_pa
,
1435 PAGE_SIZE
, 1) < 0) {
1436 dev_warn(xpc_part
, "xpc_activating(%d) failed to register "
1437 "xp_addr region\n", XPC_PARTID(part
));
1439 ret
= xpPhysAddrRegFailed
;
1440 XPC_DEACTIVATE_PARTITION(part
, ret
);
1445 * Send activate IRQ to get other side to activate if they've not
1446 * already begun to do so.
1448 xpc_send_activate_IRQ_sn2(part_sn2
->remote_amos_page_pa
,
1449 cnodeid_to_nasid(0),
1450 part_sn2
->activate_IRQ_nasid
,
1451 part_sn2
->activate_IRQ_phys_cpuid
);
1453 while ((ret
= xpc_pull_remote_vars_part_sn2(part
)) != xpSuccess
) {
1454 if (ret
!= xpRetry
) {
1455 XPC_DEACTIVATE_PARTITION(part
, ret
);
1459 dev_dbg(xpc_part
, "waiting to make first contact with "
1460 "partition %d\n", XPC_PARTID(part
));
1462 /* wait a 1/4 of a second or so */
1463 (void)msleep_interruptible(250);
1465 if (part
->act_state
== XPC_P_DEACTIVATING
)
1466 return part
->reason
;
1473 * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1476 xpc_get_chctl_all_flags_sn2(struct xpc_partition
*part
)
1478 struct xpc_partition_sn2
*part_sn2
= &part
->sn
.sn2
;
1479 unsigned long irq_flags
;
1480 union xpc_channel_ctl_flags chctl
;
1484 * See if there are any chctl flags to be handled.
1487 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1488 chctl
= part
->chctl
;
1489 if (chctl
.all_flags
!= 0)
1490 part
->chctl
.all_flags
= 0;
1492 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1494 if (xpc_any_openclose_chctl_flags_set(&chctl
)) {
1495 ret
= xpc_pull_remote_cachelines_sn2(part
, part
->
1496 remote_openclose_args
,
1498 remote_openclose_args_pa
,
1499 XPC_OPENCLOSE_ARGS_SIZE
);
1500 if (ret
!= xpSuccess
) {
1501 XPC_DEACTIVATE_PARTITION(part
, ret
);
1503 dev_dbg(xpc_chan
, "failed to pull openclose args from "
1504 "partition %d, ret=%d\n", XPC_PARTID(part
),
1507 /* don't bother processing chctl flags anymore */
1508 chctl
.all_flags
= 0;
1512 if (xpc_any_msg_chctl_flags_set(&chctl
)) {
1513 ret
= xpc_pull_remote_cachelines_sn2(part
, part_sn2
->remote_GPs
,
1514 (void *)part_sn2
->remote_GPs_pa
,
1516 if (ret
!= xpSuccess
) {
1517 XPC_DEACTIVATE_PARTITION(part
, ret
);
1519 dev_dbg(xpc_chan
, "failed to pull GPs from partition "
1520 "%d, ret=%d\n", XPC_PARTID(part
), ret
);
1522 /* don't bother processing chctl flags anymore */
1523 chctl
.all_flags
= 0;
1527 return chctl
.all_flags
;
1531 * Notify those who wanted to be notified upon delivery of their message.
1534 xpc_notify_senders_sn2(struct xpc_channel
*ch
, enum xp_retval reason
, s64 put
)
1536 struct xpc_notify
*notify
;
1538 s64 get
= ch
->sn
.sn2
.w_remote_GP
.get
- 1;
1540 while (++get
< put
&& atomic_read(&ch
->n_to_notify
) > 0) {
1542 notify
= &ch
->notify_queue
[get
% ch
->local_nentries
];
1545 * See if the notify entry indicates it was associated with
1546 * a message who's sender wants to be notified. It is possible
1547 * that it is, but someone else is doing or has done the
1550 notify_type
= notify
->type
;
1551 if (notify_type
== 0 ||
1552 cmpxchg(¬ify
->type
, notify_type
, 0) != notify_type
) {
1556 DBUG_ON(notify_type
!= XPC_N_CALL
);
1558 atomic_dec(&ch
->n_to_notify
);
1560 if (notify
->func
!= NULL
) {
1561 dev_dbg(xpc_chan
, "notify->func() called, notify=0x%p, "
1562 "msg_number=%ld, partid=%d, channel=%d\n",
1563 (void *)notify
, get
, ch
->partid
, ch
->number
);
1565 notify
->func(reason
, ch
->partid
, ch
->number
,
1568 dev_dbg(xpc_chan
, "notify->func() returned, "
1569 "notify=0x%p, msg_number=%ld, partid=%d, "
1570 "channel=%d\n", (void *)notify
, get
,
1571 ch
->partid
, ch
->number
);
1577 xpc_notify_senders_of_disconnect_sn2(struct xpc_channel
*ch
)
1579 xpc_notify_senders_sn2(ch
, ch
->reason
, ch
->sn
.sn2
.w_local_GP
.put
);
1583 * Clear some of the msg flags in the local message queue.
1586 xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel
*ch
)
1588 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1589 struct xpc_msg
*msg
;
1592 get
= ch_sn2
->w_remote_GP
.get
;
1594 msg
= (struct xpc_msg
*)((u64
)ch
->local_msgqueue
+
1595 (get
% ch
->local_nentries
) *
1598 } while (++get
< ch_sn2
->remote_GP
.get
);
1602 * Clear some of the msg flags in the remote message queue.
1605 xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel
*ch
)
1607 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1608 struct xpc_msg
*msg
;
1611 put
= ch_sn2
->w_remote_GP
.put
;
1613 msg
= (struct xpc_msg
*)((u64
)ch
->remote_msgqueue
+
1614 (put
% ch
->remote_nentries
) *
1617 } while (++put
< ch_sn2
->remote_GP
.put
);
1621 xpc_process_msg_chctl_flags_sn2(struct xpc_partition
*part
, int ch_number
)
1623 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1624 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1627 ch_sn2
->remote_GP
= part
->sn
.sn2
.remote_GPs
[ch_number
];
1629 /* See what, if anything, has changed for each connected channel */
1631 xpc_msgqueue_ref(ch
);
1633 if (ch_sn2
->w_remote_GP
.get
== ch_sn2
->remote_GP
.get
&&
1634 ch_sn2
->w_remote_GP
.put
== ch_sn2
->remote_GP
.put
) {
1635 /* nothing changed since GPs were last pulled */
1636 xpc_msgqueue_deref(ch
);
1640 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1641 xpc_msgqueue_deref(ch
);
1646 * First check to see if messages recently sent by us have been
1647 * received by the other side. (The remote GET value will have
1648 * changed since we last looked at it.)
1651 if (ch_sn2
->w_remote_GP
.get
!= ch_sn2
->remote_GP
.get
) {
1654 * We need to notify any senders that want to be notified
1655 * that their sent messages have been received by their
1656 * intended recipients. We need to do this before updating
1657 * w_remote_GP.get so that we don't allocate the same message
1658 * queue entries prematurely (see xpc_allocate_msg()).
1660 if (atomic_read(&ch
->n_to_notify
) > 0) {
1662 * Notify senders that messages sent have been
1663 * received and delivered by the other side.
1665 xpc_notify_senders_sn2(ch
, xpMsgDelivered
,
1666 ch_sn2
->remote_GP
.get
);
1670 * Clear msg->flags in previously sent messages, so that
1671 * they're ready for xpc_allocate_msg().
1673 xpc_clear_local_msgqueue_flags_sn2(ch
);
1675 ch_sn2
->w_remote_GP
.get
= ch_sn2
->remote_GP
.get
;
1677 dev_dbg(xpc_chan
, "w_remote_GP.get changed to %ld, partid=%d, "
1678 "channel=%d\n", ch_sn2
->w_remote_GP
.get
, ch
->partid
,
1682 * If anyone was waiting for message queue entries to become
1683 * available, wake them up.
1685 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1686 wake_up(&ch
->msg_allocate_wq
);
1690 * Now check for newly sent messages by the other side. (The remote
1691 * PUT value will have changed since we last looked at it.)
1694 if (ch_sn2
->w_remote_GP
.put
!= ch_sn2
->remote_GP
.put
) {
1696 * Clear msg->flags in previously received messages, so that
1697 * they're ready for xpc_get_deliverable_msg().
1699 xpc_clear_remote_msgqueue_flags_sn2(ch
);
1701 ch_sn2
->w_remote_GP
.put
= ch_sn2
->remote_GP
.put
;
1703 dev_dbg(xpc_chan
, "w_remote_GP.put changed to %ld, partid=%d, "
1704 "channel=%d\n", ch_sn2
->w_remote_GP
.put
, ch
->partid
,
1707 nmsgs_sent
= ch_sn2
->w_remote_GP
.put
- ch_sn2
->w_local_GP
.get
;
1708 if (nmsgs_sent
> 0) {
1709 dev_dbg(xpc_chan
, "msgs waiting to be copied and "
1710 "delivered=%d, partid=%d, channel=%d\n",
1711 nmsgs_sent
, ch
->partid
, ch
->number
);
1713 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)
1714 xpc_activate_kthreads(ch
, nmsgs_sent
);
1718 xpc_msgqueue_deref(ch
);
1721 static struct xpc_msg
*
1722 xpc_pull_remote_msg_sn2(struct xpc_channel
*ch
, s64 get
)
1724 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
1725 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1726 struct xpc_msg
*remote_msg
, *msg
;
1727 u32 msg_index
, nmsgs
;
1731 if (mutex_lock_interruptible(&ch_sn2
->msg_to_pull_mutex
) != 0) {
1732 /* we were interrupted by a signal */
1736 while (get
>= ch_sn2
->next_msg_to_pull
) {
1738 /* pull as many messages as are ready and able to be pulled */
1740 msg_index
= ch_sn2
->next_msg_to_pull
% ch
->remote_nentries
;
1742 DBUG_ON(ch_sn2
->next_msg_to_pull
>= ch_sn2
->w_remote_GP
.put
);
1743 nmsgs
= ch_sn2
->w_remote_GP
.put
- ch_sn2
->next_msg_to_pull
;
1744 if (msg_index
+ nmsgs
> ch
->remote_nentries
) {
1745 /* ignore the ones that wrap the msg queue for now */
1746 nmsgs
= ch
->remote_nentries
- msg_index
;
1749 msg_offset
= msg_index
* ch
->msg_size
;
1750 msg
= (struct xpc_msg
*)((u64
)ch
->remote_msgqueue
+ msg_offset
);
1751 remote_msg
= (struct xpc_msg
*)(ch
->remote_msgqueue_pa
+
1754 ret
= xpc_pull_remote_cachelines_sn2(part
, msg
, remote_msg
,
1755 nmsgs
* ch
->msg_size
);
1756 if (ret
!= xpSuccess
) {
1758 dev_dbg(xpc_chan
, "failed to pull %d msgs starting with"
1759 " msg %ld from partition %d, channel=%d, "
1760 "ret=%d\n", nmsgs
, ch_sn2
->next_msg_to_pull
,
1761 ch
->partid
, ch
->number
, ret
);
1763 XPC_DEACTIVATE_PARTITION(part
, ret
);
1765 mutex_unlock(&ch_sn2
->msg_to_pull_mutex
);
1769 ch_sn2
->next_msg_to_pull
+= nmsgs
;
1772 mutex_unlock(&ch_sn2
->msg_to_pull_mutex
);
1774 /* return the message we were looking for */
1775 msg_offset
= (get
% ch
->remote_nentries
) * ch
->msg_size
;
1776 msg
= (struct xpc_msg
*)((u64
)ch
->remote_msgqueue
+ msg_offset
);
1782 xpc_n_of_deliverable_msgs_sn2(struct xpc_channel
*ch
)
1784 return ch
->sn
.sn2
.w_remote_GP
.put
- ch
->sn
.sn2
.w_local_GP
.get
;
1788 * Get a message to be delivered.
1790 static struct xpc_msg
*
1791 xpc_get_deliverable_msg_sn2(struct xpc_channel
*ch
)
1793 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1794 struct xpc_msg
*msg
= NULL
;
1798 if (ch
->flags
& XPC_C_DISCONNECTING
)
1801 get
= ch_sn2
->w_local_GP
.get
;
1802 rmb(); /* guarantee that .get loads before .put */
1803 if (get
== ch_sn2
->w_remote_GP
.put
)
1806 /* There are messages waiting to be pulled and delivered.
1807 * We need to try to secure one for ourselves. We'll do this
1808 * by trying to increment w_local_GP.get and hope that no one
1809 * else beats us to it. If they do, we'll we'll simply have
1810 * to try again for the next one.
1813 if (cmpxchg(&ch_sn2
->w_local_GP
.get
, get
, get
+ 1) == get
) {
1814 /* we got the entry referenced by get */
1816 dev_dbg(xpc_chan
, "w_local_GP.get changed to %ld, "
1817 "partid=%d, channel=%d\n", get
+ 1,
1818 ch
->partid
, ch
->number
);
1820 /* pull the message from the remote partition */
1822 msg
= xpc_pull_remote_msg_sn2(ch
, get
);
1824 DBUG_ON(msg
!= NULL
&& msg
->number
!= get
);
1825 DBUG_ON(msg
!= NULL
&& (msg
->flags
& XPC_M_DONE
));
1826 DBUG_ON(msg
!= NULL
&& !(msg
->flags
& XPC_M_READY
));
1837 * Now we actually send the messages that are ready to be sent by advancing
1838 * the local message queue's Put value and then send a chctl msgrequest to the
1839 * recipient partition.
1842 xpc_send_msgs_sn2(struct xpc_channel
*ch
, s64 initial_put
)
1844 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1845 struct xpc_msg
*msg
;
1846 s64 put
= initial_put
+ 1;
1847 int send_msgrequest
= 0;
1852 if (put
== ch_sn2
->w_local_GP
.put
)
1855 msg
= (struct xpc_msg
*)((u64
)ch
->local_msgqueue
+
1856 (put
% ch
->local_nentries
) *
1859 if (!(msg
->flags
& XPC_M_READY
))
1865 if (put
== initial_put
) {
1866 /* nothing's changed */
1870 if (cmpxchg_rel(&ch_sn2
->local_GP
->put
, initial_put
, put
) !=
1872 /* someone else beat us to it */
1873 DBUG_ON(ch_sn2
->local_GP
->put
< initial_put
);
1877 /* we just set the new value of local_GP->put */
1879 dev_dbg(xpc_chan
, "local_GP->put changed to %ld, partid=%d, "
1880 "channel=%d\n", put
, ch
->partid
, ch
->number
);
1882 send_msgrequest
= 1;
1885 * We need to ensure that the message referenced by
1886 * local_GP->put is not XPC_M_READY or that local_GP->put
1887 * equals w_local_GP.put, so we'll go have a look.
1892 if (send_msgrequest
)
1893 xpc_send_chctl_msgrequest_sn2(ch
);
1897 * Allocate an entry for a message from the message queue associated with the
1898 * specified channel.
1900 static enum xp_retval
1901 xpc_allocate_msg_sn2(struct xpc_channel
*ch
, u32 flags
,
1902 struct xpc_msg
**address_of_msg
)
1904 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
1905 struct xpc_msg
*msg
;
1910 * Get the next available message entry from the local message queue.
1911 * If none are available, we'll make sure that we grab the latest
1918 put
= ch_sn2
->w_local_GP
.put
;
1919 rmb(); /* guarantee that .put loads before .get */
1920 if (put
- ch_sn2
->w_remote_GP
.get
< ch
->local_nentries
) {
1922 /* There are available message entries. We need to try
1923 * to secure one for ourselves. We'll do this by trying
1924 * to increment w_local_GP.put as long as someone else
1925 * doesn't beat us to it. If they do, we'll have to
1928 if (cmpxchg(&ch_sn2
->w_local_GP
.put
, put
, put
+ 1) ==
1930 /* we got the entry referenced by put */
1933 continue; /* try again */
1937 * There aren't any available msg entries at this time.
1939 * In waiting for a message entry to become available,
1940 * we set a timeout in case the other side is not sending
1941 * completion interrupts. This lets us fake a notify IRQ
1942 * that will cause the notify IRQ handler to fetch the latest
1943 * GP values as if an interrupt was sent by the other side.
1945 if (ret
== xpTimeout
)
1946 xpc_send_chctl_local_msgrequest_sn2(ch
);
1948 if (flags
& XPC_NOWAIT
)
1951 ret
= xpc_allocate_msg_wait(ch
);
1952 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1956 /* get the message's address and initialize it */
1957 msg
= (struct xpc_msg
*)((u64
)ch
->local_msgqueue
+
1958 (put
% ch
->local_nentries
) * ch
->msg_size
);
1960 DBUG_ON(msg
->flags
!= 0);
1963 dev_dbg(xpc_chan
, "w_local_GP.put changed to %ld; msg=0x%p, "
1964 "msg_number=%ld, partid=%d, channel=%d\n", put
+ 1,
1965 (void *)msg
, msg
->number
, ch
->partid
, ch
->number
);
1967 *address_of_msg
= msg
;
1972 * Common code that does the actual sending of the message by advancing the
1973 * local message queue's Put value and sends a chctl msgrequest to the
1974 * partition the message is being sent to.
1976 static enum xp_retval
1977 xpc_send_msg_sn2(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1978 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1981 enum xp_retval ret
= xpSuccess
;
1982 struct xpc_msg
*msg
= msg
;
1983 struct xpc_notify
*notify
= notify
;
1987 DBUG_ON(notify_type
== XPC_N_CALL
&& func
== NULL
);
1989 if (XPC_MSG_SIZE(payload_size
) > ch
->msg_size
)
1990 return xpPayloadTooBig
;
1992 xpc_msgqueue_ref(ch
);
1994 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1998 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1999 ret
= xpNotConnected
;
2003 ret
= xpc_allocate_msg_sn2(ch
, flags
, &msg
);
2004 if (ret
!= xpSuccess
)
2007 msg_number
= msg
->number
;
2009 if (notify_type
!= 0) {
2011 * Tell the remote side to send an ACK interrupt when the
2012 * message has been delivered.
2014 msg
->flags
|= XPC_M_INTERRUPT
;
2016 atomic_inc(&ch
->n_to_notify
);
2018 notify
= &ch
->notify_queue
[msg_number
% ch
->local_nentries
];
2019 notify
->func
= func
;
2021 notify
->type
= notify_type
;
2023 /* >>> is a mb() needed here? */
2025 if (ch
->flags
& XPC_C_DISCONNECTING
) {
2027 * An error occurred between our last error check and
2028 * this one. We will try to clear the type field from
2029 * the notify entry. If we succeed then
2030 * xpc_disconnect_channel() didn't already process
2033 if (cmpxchg(¬ify
->type
, notify_type
, 0) ==
2035 atomic_dec(&ch
->n_to_notify
);
2042 memcpy(&msg
->payload
, payload
, payload_size
);
2044 msg
->flags
|= XPC_M_READY
;
2047 * The preceding store of msg->flags must occur before the following
2048 * load of local_GP->put.
2052 /* see if the message is next in line to be sent, if so send it */
2054 put
= ch
->sn
.sn2
.local_GP
->put
;
2055 if (put
== msg_number
)
2056 xpc_send_msgs_sn2(ch
, put
);
2059 xpc_msgqueue_deref(ch
);
2064 * Now we actually acknowledge the messages that have been delivered and ack'd
2065 * by advancing the cached remote message queue's Get value and if requested
2066 * send a chctl msgrequest to the message sender's partition.
2069 xpc_acknowledge_msgs_sn2(struct xpc_channel
*ch
, s64 initial_get
, u8 msg_flags
)
2071 struct xpc_channel_sn2
*ch_sn2
= &ch
->sn
.sn2
;
2072 struct xpc_msg
*msg
;
2073 s64 get
= initial_get
+ 1;
2074 int send_msgrequest
= 0;
2079 if (get
== ch_sn2
->w_local_GP
.get
)
2082 msg
= (struct xpc_msg
*)((u64
)ch
->remote_msgqueue
+
2083 (get
% ch
->remote_nentries
) *
2086 if (!(msg
->flags
& XPC_M_DONE
))
2089 msg_flags
|= msg
->flags
;
2093 if (get
== initial_get
) {
2094 /* nothing's changed */
2098 if (cmpxchg_rel(&ch_sn2
->local_GP
->get
, initial_get
, get
) !=
2100 /* someone else beat us to it */
2101 DBUG_ON(ch_sn2
->local_GP
->get
<= initial_get
);
2105 /* we just set the new value of local_GP->get */
2107 dev_dbg(xpc_chan
, "local_GP->get changed to %ld, partid=%d, "
2108 "channel=%d\n", get
, ch
->partid
, ch
->number
);
2110 send_msgrequest
= (msg_flags
& XPC_M_INTERRUPT
);
2113 * We need to ensure that the message referenced by
2114 * local_GP->get is not XPC_M_DONE or that local_GP->get
2115 * equals w_local_GP.get, so we'll go have a look.
2120 if (send_msgrequest
)
2121 xpc_send_chctl_msgrequest_sn2(ch
);
2125 xpc_received_msg_sn2(struct xpc_channel
*ch
, struct xpc_msg
*msg
)
2128 s64 msg_number
= msg
->number
;
2130 dev_dbg(xpc_chan
, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n",
2131 (void *)msg
, msg_number
, ch
->partid
, ch
->number
);
2133 DBUG_ON((((u64
)msg
- (u64
)ch
->remote_msgqueue
) / ch
->msg_size
) !=
2134 msg_number
% ch
->remote_nentries
);
2135 DBUG_ON(msg
->flags
& XPC_M_DONE
);
2137 msg
->flags
|= XPC_M_DONE
;
2140 * The preceding store of msg->flags must occur before the following
2141 * load of local_GP->get.
2146 * See if this message is next in line to be acknowledged as having
2149 get
= ch
->sn
.sn2
.local_GP
->get
;
2150 if (get
== msg_number
)
2151 xpc_acknowledge_msgs_sn2(ch
, get
, msg
->flags
);
2159 xpc_rsvd_page_init
= xpc_rsvd_page_init_sn2
;
2160 xpc_increment_heartbeat
= xpc_increment_heartbeat_sn2
;
2161 xpc_offline_heartbeat
= xpc_offline_heartbeat_sn2
;
2162 xpc_online_heartbeat
= xpc_online_heartbeat_sn2
;
2163 xpc_heartbeat_init
= xpc_heartbeat_init_sn2
;
2164 xpc_heartbeat_exit
= xpc_heartbeat_exit_sn2
;
2165 xpc_check_remote_hb
= xpc_check_remote_hb_sn2
;
2167 xpc_request_partition_activation
= xpc_request_partition_activation_sn2
;
2168 xpc_request_partition_reactivation
=
2169 xpc_request_partition_reactivation_sn2
;
2170 xpc_request_partition_deactivation
=
2171 xpc_request_partition_deactivation_sn2
;
2172 xpc_cancel_partition_deactivation_request
=
2173 xpc_cancel_partition_deactivation_request_sn2
;
2175 xpc_process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_sn2
;
2176 xpc_setup_infrastructure
= xpc_setup_infrastructure_sn2
;
2177 xpc_teardown_infrastructure
= xpc_teardown_infrastructure_sn2
;
2178 xpc_make_first_contact
= xpc_make_first_contact_sn2
;
2179 xpc_get_chctl_all_flags
= xpc_get_chctl_all_flags_sn2
;
2180 xpc_notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_sn2
;
2181 xpc_process_msg_chctl_flags
= xpc_process_msg_chctl_flags_sn2
;
2182 xpc_n_of_deliverable_msgs
= xpc_n_of_deliverable_msgs_sn2
;
2183 xpc_get_deliverable_msg
= xpc_get_deliverable_msg_sn2
;
2185 xpc_indicate_partition_engaged
= xpc_indicate_partition_engaged_sn2
;
2186 xpc_partition_engaged
= xpc_partition_engaged_sn2
;
2187 xpc_any_partition_engaged
= xpc_any_partition_engaged_sn2
;
2188 xpc_indicate_partition_disengaged
=
2189 xpc_indicate_partition_disengaged_sn2
;
2190 xpc_assume_partition_disengaged
= xpc_assume_partition_disengaged_sn2
;
2192 xpc_send_chctl_closerequest
= xpc_send_chctl_closerequest_sn2
;
2193 xpc_send_chctl_closereply
= xpc_send_chctl_closereply_sn2
;
2194 xpc_send_chctl_openrequest
= xpc_send_chctl_openrequest_sn2
;
2195 xpc_send_chctl_openreply
= xpc_send_chctl_openreply_sn2
;
2197 xpc_send_msg
= xpc_send_msg_sn2
;
2198 xpc_received_msg
= xpc_received_msg_sn2
;
2200 /* open up protections for IPI and [potentially] amo operations */
2201 xpc_allow_IPI_ops_sn2();
2202 xpc_allow_amo_ops_shub_wars_1_1_sn2();
2205 * This is safe to do before the xpc_hb_checker thread has started
2206 * because the handler releases a wait queue. If an interrupt is
2207 * received before the thread is waiting, it will not go to sleep,
2208 * but rather immediately process the interrupt.
2210 ret
= request_irq(SGI_XPC_ACTIVATE
, xpc_handle_activate_IRQ_sn2
, 0,
2213 dev_err(xpc_part
, "can't register ACTIVATE IRQ handler, "
2214 "errno=%d\n", -ret
);
2215 xpc_disallow_IPI_ops_sn2();
2223 free_irq(SGI_XPC_ACTIVATE
, NULL
);
2224 xpc_disallow_IPI_ops_sn2();