2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <asm/uv/uv_hub.h>
23 #if defined CONFIG_X86_64
24 #include <asm/uv/bios.h>
25 #include <asm/uv/uv_irq.h>
26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
30 #include "../sgi-gru/gru.h"
31 #include "../sgi-gru/grukservices.h"
34 static atomic64_t xpc_heartbeat_uv
;
35 static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv
, XP_MAX_NPARTITIONS_UV
);
37 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
38 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
39 XPC_ACTIVATE_MSG_SIZE_UV)
40 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
42 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
43 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
44 XPC_NOTIFY_MSG_SIZE_UV)
45 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
47 static struct xpc_gru_mq_uv
*xpc_activate_mq_uv
;
48 static struct xpc_gru_mq_uv
*xpc_notify_mq_uv
;
51 xpc_setup_partitions_sn_uv(void)
54 struct xpc_partition_uv
*part_uv
;
56 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
57 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
59 spin_lock_init(&part_uv
->flags_lock
);
60 part_uv
->remote_act_state
= XPC_P_AS_INACTIVE
;
66 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
, int cpu
, char *irq_name
)
68 #if defined CONFIG_X86_64
69 mq
->irq
= uv_setup_irq(irq_name
, cpu
, mq
->mmr_blade
, mq
->mmr_offset
);
71 dev_err(xpc_part
, "uv_setup_irq() returned error=%d\n",
75 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
77 unsigned long mmr_value
;
79 if (strcmp(irq_name
, XPC_ACTIVATE_IRQ_NAME
) == 0)
80 mq
->irq
= SGI_XPC_ACTIVATE
;
81 else if (strcmp(irq_name
, XPC_NOTIFY_IRQ_NAME
) == 0)
82 mq
->irq
= SGI_XPC_NOTIFY
;
86 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
87 mmr_value
= (unsigned long)cpu_physical_id(cpu
) << 32 | mq
->irq
;
89 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
91 #error not a supported configuration
98 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
)
100 #if defined CONFIG_X86_64
101 uv_teardown_irq(mq
->irq
, mq
->mmr_blade
, mq
->mmr_offset
);
103 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
105 unsigned long mmr_value
;
107 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
108 mmr_value
= 1UL << 16;
110 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
112 #error not a supported configuration
117 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv
*mq
)
121 #if defined CONFIG_X86_64
122 ret
= uv_bios_mq_watchlist_alloc(mq
->mmr_blade
, uv_gpa(mq
->address
),
123 mq
->order
, &mq
->mmr_offset
);
125 dev_err(xpc_part
, "uv_bios_mq_watchlist_alloc() failed, "
129 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
130 ret
= sn_mq_watchlist_alloc(mq
->mmr_blade
, uv_gpa(mq
->address
),
131 mq
->order
, &mq
->mmr_offset
);
133 dev_err(xpc_part
, "sn_mq_watchlist_alloc() failed, ret=%d\n",
138 #error not a supported configuration
141 mq
->watchlist_num
= ret
;
146 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv
*mq
)
150 #if defined CONFIG_X86_64
151 ret
= uv_bios_mq_watchlist_free(mq
->mmr_blade
, mq
->watchlist_num
);
152 BUG_ON(ret
!= BIOS_STATUS_SUCCESS
);
153 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
154 ret
= sn_mq_watchlist_free(mq
->mmr_blade
, mq
->watchlist_num
);
155 BUG_ON(ret
!= SALRET_OK
);
157 #error not a supported configuration
161 static struct xpc_gru_mq_uv
*
162 xpc_create_gru_mq_uv(unsigned int mq_size
, int cpu
, char *irq_name
,
163 irq_handler_t irq_handler
)
165 enum xp_retval xp_ret
;
170 struct xpc_gru_mq_uv
*mq
;
172 mq
= kmalloc(sizeof(struct xpc_gru_mq_uv
), GFP_KERNEL
);
174 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
175 "a xpc_gru_mq_uv structure\n");
180 pg_order
= get_order(mq_size
);
181 mq
->order
= pg_order
+ PAGE_SHIFT
;
182 mq_size
= 1UL << mq
->order
;
184 mq
->mmr_blade
= uv_cpu_to_blade_id(cpu
);
186 nid
= cpu_to_node(cpu
);
187 page
= alloc_pages_node(nid
, GFP_KERNEL
| __GFP_ZERO
| GFP_THISNODE
,
190 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to alloc %d "
191 "bytes of memory on nid=%d for GRU mq\n", mq_size
, nid
);
195 mq
->address
= page_address(page
);
197 ret
= gru_create_message_queue(mq
->address
, mq_size
);
199 dev_err(xpc_part
, "gru_create_message_queue() returned "
205 /* enable generation of irq when GRU mq operation occurs to this mq */
206 ret
= xpc_gru_mq_watchlist_alloc_uv(mq
);
210 ret
= xpc_get_gru_mq_irq_uv(mq
, cpu
, irq_name
);
214 ret
= request_irq(mq
->irq
, irq_handler
, 0, irq_name
, NULL
);
216 dev_err(xpc_part
, "request_irq(irq=%d) returned error=%d\n",
221 /* allow other partitions to access this GRU mq */
222 xp_ret
= xp_expand_memprotect(xp_pa(mq
->address
), mq_size
);
223 if (xp_ret
!= xpSuccess
) {
230 /* something went wrong */
232 free_irq(mq
->irq
, NULL
);
234 xpc_release_gru_mq_irq_uv(mq
);
236 xpc_gru_mq_watchlist_free_uv(mq
);
238 free_pages((unsigned long)mq
->address
, pg_order
);
246 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv
*mq
)
248 unsigned int mq_size
;
252 /* disallow other partitions to access GRU mq */
253 mq_size
= 1UL << mq
->order
;
254 ret
= xp_restrict_memprotect(xp_pa(mq
->address
), mq_size
);
255 BUG_ON(ret
!= xpSuccess
);
257 /* unregister irq handler and release mq irq/vector mapping */
258 free_irq(mq
->irq
, NULL
);
259 xpc_release_gru_mq_irq_uv(mq
);
261 /* disable generation of irq when GRU mq op occurs to this mq */
262 xpc_gru_mq_watchlist_free_uv(mq
);
264 pg_order
= mq
->order
- PAGE_SHIFT
;
265 free_pages((unsigned long)mq
->address
, pg_order
);
270 static enum xp_retval
271 xpc_send_gru_msg(unsigned long mq_gpa
, void *msg
, size_t msg_size
)
273 enum xp_retval xp_ret
;
277 ret
= gru_send_message_gpa(mq_gpa
, msg
, msg_size
);
283 if (ret
== MQE_QUEUE_FULL
) {
284 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
285 "error=MQE_QUEUE_FULL\n");
286 /* !!! handle QLimit reached; delay & try again */
287 /* ??? Do we add a limit to the number of retries? */
288 (void)msleep_interruptible(10);
289 } else if (ret
== MQE_CONGESTION
) {
290 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
291 "error=MQE_CONGESTION\n");
292 /* !!! handle LB Overflow; simply try again */
293 /* ??? Do we add a limit to the number of retries? */
295 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
296 dev_err(xpc_chan
, "gru_send_message_gpa() returned "
298 xp_ret
= xpGruSendMqError
;
306 xpc_process_activate_IRQ_rcvd_uv(void)
308 unsigned long irq_flags
;
310 struct xpc_partition
*part
;
313 DBUG_ON(xpc_activate_IRQ_rcvd
== 0);
315 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
316 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
317 part
= &xpc_partitions
[partid
];
319 if (part
->sn
.uv
.act_state_req
== 0)
322 xpc_activate_IRQ_rcvd
--;
323 BUG_ON(xpc_activate_IRQ_rcvd
< 0);
325 act_state_req
= part
->sn
.uv
.act_state_req
;
326 part
->sn
.uv
.act_state_req
= 0;
327 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
329 if (act_state_req
== XPC_P_ASR_ACTIVATE_UV
) {
330 if (part
->act_state
== XPC_P_AS_INACTIVE
)
331 xpc_activate_partition(part
);
332 else if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
333 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
335 } else if (act_state_req
== XPC_P_ASR_REACTIVATE_UV
) {
336 if (part
->act_state
== XPC_P_AS_INACTIVE
)
337 xpc_activate_partition(part
);
339 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
341 } else if (act_state_req
== XPC_P_ASR_DEACTIVATE_UV
) {
342 XPC_DEACTIVATE_PARTITION(part
, part
->sn
.uv
.reason
);
348 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
349 if (xpc_activate_IRQ_rcvd
== 0)
352 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
357 xpc_handle_activate_mq_msg_uv(struct xpc_partition
*part
,
358 struct xpc_activate_mq_msghdr_uv
*msg_hdr
,
359 int *wakeup_hb_checker
)
361 unsigned long irq_flags
;
362 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
363 struct xpc_openclose_args
*args
;
365 part_uv
->remote_act_state
= msg_hdr
->act_state
;
367 switch (msg_hdr
->type
) {
368 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
:
369 /* syncing of remote_act_state was just done above */
372 case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV
: {
373 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
375 msg
= container_of(msg_hdr
,
376 struct xpc_activate_mq_msg_heartbeat_req_uv
,
378 part_uv
->heartbeat
= msg
->heartbeat
;
381 case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
: {
382 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
384 msg
= container_of(msg_hdr
,
385 struct xpc_activate_mq_msg_heartbeat_req_uv
,
387 part_uv
->heartbeat
= msg
->heartbeat
;
389 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
390 part_uv
->flags
|= XPC_P_HEARTBEAT_OFFLINE_UV
;
391 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
394 case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV
: {
395 struct xpc_activate_mq_msg_heartbeat_req_uv
*msg
;
397 msg
= container_of(msg_hdr
,
398 struct xpc_activate_mq_msg_heartbeat_req_uv
,
400 part_uv
->heartbeat
= msg
->heartbeat
;
402 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
403 part_uv
->flags
&= ~XPC_P_HEARTBEAT_OFFLINE_UV
;
404 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
407 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
: {
408 struct xpc_activate_mq_msg_activate_req_uv
*msg
;
411 * ??? Do we deal here with ts_jiffies being different
412 * ??? if act_state != XPC_P_AS_INACTIVE instead of
415 msg
= container_of(msg_hdr
, struct
416 xpc_activate_mq_msg_activate_req_uv
, hdr
);
418 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
419 if (part_uv
->act_state_req
== 0)
420 xpc_activate_IRQ_rcvd
++;
421 part_uv
->act_state_req
= XPC_P_ASR_ACTIVATE_UV
;
422 part
->remote_rp_pa
= msg
->rp_gpa
; /* !!! _pa is _gpa */
423 part
->remote_rp_ts_jiffies
= msg_hdr
->rp_ts_jiffies
;
424 part_uv
->remote_activate_mq_gpa
= msg
->activate_mq_gpa
;
425 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
427 (*wakeup_hb_checker
)++;
430 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
: {
431 struct xpc_activate_mq_msg_deactivate_req_uv
*msg
;
433 msg
= container_of(msg_hdr
, struct
434 xpc_activate_mq_msg_deactivate_req_uv
, hdr
);
436 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
437 if (part_uv
->act_state_req
== 0)
438 xpc_activate_IRQ_rcvd
++;
439 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
440 part_uv
->reason
= msg
->reason
;
441 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
443 (*wakeup_hb_checker
)++;
446 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
: {
447 struct xpc_activate_mq_msg_chctl_closerequest_uv
*msg
;
449 msg
= container_of(msg_hdr
, struct
450 xpc_activate_mq_msg_chctl_closerequest_uv
,
452 args
= &part
->remote_openclose_args
[msg
->ch_number
];
453 args
->reason
= msg
->reason
;
455 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
456 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREQUEST
;
457 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
459 xpc_wakeup_channel_mgr(part
);
462 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
: {
463 struct xpc_activate_mq_msg_chctl_closereply_uv
*msg
;
465 msg
= container_of(msg_hdr
, struct
466 xpc_activate_mq_msg_chctl_closereply_uv
,
469 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
470 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREPLY
;
471 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
473 xpc_wakeup_channel_mgr(part
);
476 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
: {
477 struct xpc_activate_mq_msg_chctl_openrequest_uv
*msg
;
479 msg
= container_of(msg_hdr
, struct
480 xpc_activate_mq_msg_chctl_openrequest_uv
,
482 args
= &part
->remote_openclose_args
[msg
->ch_number
];
483 args
->entry_size
= msg
->entry_size
;
484 args
->local_nentries
= msg
->local_nentries
;
486 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
487 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREQUEST
;
488 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
490 xpc_wakeup_channel_mgr(part
);
493 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
: {
494 struct xpc_activate_mq_msg_chctl_openreply_uv
*msg
;
496 msg
= container_of(msg_hdr
, struct
497 xpc_activate_mq_msg_chctl_openreply_uv
, hdr
);
498 args
= &part
->remote_openclose_args
[msg
->ch_number
];
499 args
->remote_nentries
= msg
->remote_nentries
;
500 args
->local_nentries
= msg
->local_nentries
;
501 args
->local_msgqueue_pa
= msg
->local_notify_mq_gpa
;
503 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
504 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREPLY
;
505 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
507 xpc_wakeup_channel_mgr(part
);
510 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
:
511 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
512 part_uv
->flags
|= XPC_P_ENGAGED_UV
;
513 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
516 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
:
517 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
518 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
519 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
523 dev_err(xpc_part
, "received unknown activate_mq msg type=%d "
524 "from partition=%d\n", msg_hdr
->type
, XPC_PARTID(part
));
526 /* get hb checker to deactivate from the remote partition */
527 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
528 if (part_uv
->act_state_req
== 0)
529 xpc_activate_IRQ_rcvd
++;
530 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
531 part_uv
->reason
= xpBadMsgType
;
532 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
534 (*wakeup_hb_checker
)++;
538 if (msg_hdr
->rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
&&
539 part
->remote_rp_ts_jiffies
!= 0) {
541 * ??? Does what we do here need to be sensitive to
542 * ??? act_state or remote_act_state?
544 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
545 if (part_uv
->act_state_req
== 0)
546 xpc_activate_IRQ_rcvd
++;
547 part_uv
->act_state_req
= XPC_P_ASR_REACTIVATE_UV
;
548 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
550 (*wakeup_hb_checker
)++;
555 xpc_handle_activate_IRQ_uv(int irq
, void *dev_id
)
557 struct xpc_activate_mq_msghdr_uv
*msg_hdr
;
559 struct xpc_partition
*part
;
560 int wakeup_hb_checker
= 0;
563 msg_hdr
= gru_get_next_message(xpc_activate_mq_uv
->address
);
567 partid
= msg_hdr
->partid
;
568 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
569 dev_err(xpc_part
, "xpc_handle_activate_IRQ_uv() "
570 "received invalid partid=0x%x in message\n",
573 part
= &xpc_partitions
[partid
];
574 if (xpc_part_ref(part
)) {
575 xpc_handle_activate_mq_msg_uv(part
, msg_hdr
,
577 xpc_part_deref(part
);
581 gru_free_message(xpc_activate_mq_uv
->address
, msg_hdr
);
584 if (wakeup_hb_checker
)
585 wake_up_interruptible(&xpc_activate_IRQ_wq
);
590 static enum xp_retval
591 xpc_send_activate_IRQ_uv(struct xpc_partition
*part
, void *msg
, size_t msg_size
,
594 struct xpc_activate_mq_msghdr_uv
*msg_hdr
= msg
;
596 DBUG_ON(msg_size
> XPC_ACTIVATE_MSG_SIZE_UV
);
598 msg_hdr
->type
= msg_type
;
599 msg_hdr
->partid
= XPC_PARTID(part
);
600 msg_hdr
->act_state
= part
->act_state
;
601 msg_hdr
->rp_ts_jiffies
= xpc_rsvd_page
->ts_jiffies
;
603 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
604 return xpc_send_gru_msg(part
->sn
.uv
.remote_activate_mq_gpa
, msg
,
609 xpc_send_activate_IRQ_part_uv(struct xpc_partition
*part
, void *msg
,
610 size_t msg_size
, int msg_type
)
614 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
615 if (unlikely(ret
!= xpSuccess
))
616 XPC_DEACTIVATE_PARTITION(part
, ret
);
620 xpc_send_activate_IRQ_ch_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
,
621 void *msg
, size_t msg_size
, int msg_type
)
623 struct xpc_partition
*part
= &xpc_partitions
[ch
->number
];
626 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
627 if (unlikely(ret
!= xpSuccess
)) {
628 if (irq_flags
!= NULL
)
629 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
631 XPC_DEACTIVATE_PARTITION(part
, ret
);
633 if (irq_flags
!= NULL
)
634 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
639 xpc_send_local_activate_IRQ_uv(struct xpc_partition
*part
, int act_state_req
)
641 unsigned long irq_flags
;
642 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
645 * !!! Make our side think that the remote partition sent an activate
646 * !!! message our way by doing what the activate IRQ handler would
647 * !!! do had one really been sent.
650 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
651 if (part_uv
->act_state_req
== 0)
652 xpc_activate_IRQ_rcvd
++;
653 part_uv
->act_state_req
= act_state_req
;
654 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
656 wake_up_interruptible(&xpc_activate_IRQ_wq
);
659 static enum xp_retval
660 xpc_get_partition_rsvd_page_pa_uv(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
666 #if defined CONFIG_X86_64
667 status
= uv_bios_reserved_page_pa((u64
)buf
, cookie
, (u64
*)rp_pa
,
669 if (status
== BIOS_STATUS_SUCCESS
)
671 else if (status
== BIOS_STATUS_MORE_PASSES
)
672 ret
= xpNeedMoreInfo
;
676 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
677 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
678 if (status
== SALRET_OK
)
680 else if (status
== SALRET_MORE_PASSES
)
681 ret
= xpNeedMoreInfo
;
686 #error not a supported configuration
693 xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page
*rp
)
695 rp
->sn
.activate_mq_gpa
= uv_gpa(xpc_activate_mq_uv
->address
);
700 xpc_send_heartbeat_uv(int msg_type
)
703 struct xpc_partition
*part
;
704 struct xpc_activate_mq_msg_heartbeat_req_uv msg
;
707 * !!! On uv we're broadcasting a heartbeat message every 5 seconds.
708 * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20
709 * !!! seconds. This is an increase in numalink traffic.
713 msg
.heartbeat
= atomic64_inc_return(&xpc_heartbeat_uv
);
715 partid
= find_first_bit(xpc_heartbeating_to_mask_uv
,
716 XP_MAX_NPARTITIONS_UV
);
718 while (partid
< XP_MAX_NPARTITIONS_UV
) {
719 part
= &xpc_partitions
[partid
];
721 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
724 partid
= find_next_bit(xpc_heartbeating_to_mask_uv
,
725 XP_MAX_NPARTITIONS_UV
, partid
+ 1);
730 xpc_increment_heartbeat_uv(void)
732 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV
);
736 xpc_offline_heartbeat_uv(void)
738 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
);
742 xpc_online_heartbeat_uv(void)
744 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV
);
748 xpc_heartbeat_init_uv(void)
750 atomic64_set(&xpc_heartbeat_uv
, 0);
751 bitmap_zero(xpc_heartbeating_to_mask_uv
, XP_MAX_NPARTITIONS_UV
);
752 xpc_heartbeating_to_mask
= &xpc_heartbeating_to_mask_uv
[0];
756 xpc_heartbeat_exit_uv(void)
758 xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV
);
761 static enum xp_retval
762 xpc_get_remote_heartbeat_uv(struct xpc_partition
*part
)
764 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
765 enum xp_retval ret
= xpNoHeartbeat
;
767 if (part_uv
->remote_act_state
!= XPC_P_AS_INACTIVE
&&
768 part_uv
->remote_act_state
!= XPC_P_AS_DEACTIVATING
) {
770 if (part_uv
->heartbeat
!= part
->last_heartbeat
||
771 (part_uv
->flags
& XPC_P_HEARTBEAT_OFFLINE_UV
)) {
773 part
->last_heartbeat
= part_uv
->heartbeat
;
781 xpc_request_partition_activation_uv(struct xpc_rsvd_page
*remote_rp
,
782 unsigned long remote_rp_gpa
, int nasid
)
784 short partid
= remote_rp
->SAL_partid
;
785 struct xpc_partition
*part
= &xpc_partitions
[partid
];
786 struct xpc_activate_mq_msg_activate_req_uv msg
;
788 part
->remote_rp_pa
= remote_rp_gpa
; /* !!! _pa here is really _gpa */
789 part
->remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
790 part
->sn
.uv
.remote_activate_mq_gpa
= remote_rp
->sn
.activate_mq_gpa
;
793 * ??? Is it a good idea to make this conditional on what is
794 * ??? potentially stale state information?
796 if (part
->sn
.uv
.remote_act_state
== XPC_P_AS_INACTIVE
) {
797 msg
.rp_gpa
= uv_gpa(xpc_rsvd_page
);
798 msg
.activate_mq_gpa
= xpc_rsvd_page
->sn
.activate_mq_gpa
;
799 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
800 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
);
803 if (part
->act_state
== XPC_P_AS_INACTIVE
)
804 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
808 xpc_request_partition_reactivation_uv(struct xpc_partition
*part
)
810 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
814 xpc_request_partition_deactivation_uv(struct xpc_partition
*part
)
816 struct xpc_activate_mq_msg_deactivate_req_uv msg
;
819 * ??? Is it a good idea to make this conditional on what is
820 * ??? potentially stale state information?
822 if (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_DEACTIVATING
&&
823 part
->sn
.uv
.remote_act_state
!= XPC_P_AS_INACTIVE
) {
825 msg
.reason
= part
->reason
;
826 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
827 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
);
832 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition
*part
)
834 /* nothing needs to be done */
839 xpc_init_fifo_uv(struct xpc_fifo_head_uv
*head
)
843 spin_lock_init(&head
->lock
);
848 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv
*head
)
850 unsigned long irq_flags
;
851 struct xpc_fifo_entry_uv
*first
;
853 spin_lock_irqsave(&head
->lock
, irq_flags
);
855 if (head
->first
!= NULL
) {
856 head
->first
= first
->next
;
857 if (head
->first
== NULL
)
861 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
867 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv
*head
,
868 struct xpc_fifo_entry_uv
*last
)
870 unsigned long irq_flags
;
873 spin_lock_irqsave(&head
->lock
, irq_flags
);
874 if (head
->last
!= NULL
)
875 head
->last
->next
= last
;
880 BUG_ON(head
->n_entries
< 0);
881 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
885 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv
*head
)
887 return head
->n_entries
;
891 * Setup the channel structures that are uv specific.
893 static enum xp_retval
894 xpc_setup_ch_structures_sn_uv(struct xpc_partition
*part
)
896 struct xpc_channel_uv
*ch_uv
;
899 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
900 ch_uv
= &part
->channels
[ch_number
].sn
.uv
;
902 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
903 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
910 * Teardown the channel structures that are uv specific.
913 xpc_teardown_ch_structures_sn_uv(struct xpc_partition
*part
)
915 /* nothing needs to be done */
919 static enum xp_retval
920 xpc_make_first_contact_uv(struct xpc_partition
*part
)
922 struct xpc_activate_mq_msg_uv msg
;
925 * We send a sync msg to get the remote partition's remote_act_state
926 * updated to our current act_state which at this point should
927 * be XPC_P_AS_ACTIVATING.
929 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
930 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
);
932 while (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_ACTIVATING
) {
934 dev_dbg(xpc_part
, "waiting to make first contact with "
935 "partition %d\n", XPC_PARTID(part
));
937 /* wait a 1/4 of a second or so */
938 (void)msleep_interruptible(250);
940 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
948 xpc_get_chctl_all_flags_uv(struct xpc_partition
*part
)
950 unsigned long irq_flags
;
951 union xpc_channel_ctl_flags chctl
;
953 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
955 if (chctl
.all_flags
!= 0)
956 part
->chctl
.all_flags
= 0;
958 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
959 return chctl
.all_flags
;
962 static enum xp_retval
963 xpc_allocate_send_msg_slot_uv(struct xpc_channel
*ch
)
965 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
966 struct xpc_send_msg_slot_uv
*msg_slot
;
967 unsigned long irq_flags
;
972 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
973 nbytes
= nentries
* sizeof(struct xpc_send_msg_slot_uv
);
974 ch_uv
->send_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
975 if (ch_uv
->send_msg_slots
== NULL
)
978 for (entry
= 0; entry
< nentries
; entry
++) {
979 msg_slot
= &ch_uv
->send_msg_slots
[entry
];
981 msg_slot
->msg_slot_number
= entry
;
982 xpc_put_fifo_entry_uv(&ch_uv
->msg_slot_free_list
,
986 spin_lock_irqsave(&ch
->lock
, irq_flags
);
987 if (nentries
< ch
->local_nentries
)
988 ch
->local_nentries
= nentries
;
989 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
996 static enum xp_retval
997 xpc_allocate_recv_msg_slot_uv(struct xpc_channel
*ch
)
999 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1000 struct xpc_notify_mq_msg_uv
*msg_slot
;
1001 unsigned long irq_flags
;
1006 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1007 nbytes
= nentries
* ch
->entry_size
;
1008 ch_uv
->recv_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1009 if (ch_uv
->recv_msg_slots
== NULL
)
1012 for (entry
= 0; entry
< nentries
; entry
++) {
1013 msg_slot
= ch_uv
->recv_msg_slots
+
1014 entry
* ch
->entry_size
;
1016 msg_slot
->hdr
.msg_slot_number
= entry
;
1019 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1020 if (nentries
< ch
->remote_nentries
)
1021 ch
->remote_nentries
= nentries
;
1022 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1030 * Allocate msg_slots associated with the channel.
1032 static enum xp_retval
1033 xpc_setup_msg_structures_uv(struct xpc_channel
*ch
)
1035 static enum xp_retval ret
;
1036 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1038 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1040 ret
= xpc_allocate_send_msg_slot_uv(ch
);
1041 if (ret
== xpSuccess
) {
1043 ret
= xpc_allocate_recv_msg_slot_uv(ch
);
1044 if (ret
!= xpSuccess
) {
1045 kfree(ch_uv
->send_msg_slots
);
1046 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1053 * Free up msg_slots and clear other stuff that were setup for the specified
1057 xpc_teardown_msg_structures_uv(struct xpc_channel
*ch
)
1059 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1061 DBUG_ON(!spin_is_locked(&ch
->lock
));
1063 ch_uv
->remote_notify_mq_gpa
= 0;
1065 if (ch
->flags
& XPC_C_SETUP
) {
1066 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1067 kfree(ch_uv
->send_msg_slots
);
1068 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1069 kfree(ch_uv
->recv_msg_slots
);
1074 xpc_send_chctl_closerequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1076 struct xpc_activate_mq_msg_chctl_closerequest_uv msg
;
1078 msg
.ch_number
= ch
->number
;
1079 msg
.reason
= ch
->reason
;
1080 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1081 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
);
1085 xpc_send_chctl_closereply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1087 struct xpc_activate_mq_msg_chctl_closereply_uv msg
;
1089 msg
.ch_number
= ch
->number
;
1090 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1091 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
);
1095 xpc_send_chctl_openrequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1097 struct xpc_activate_mq_msg_chctl_openrequest_uv msg
;
1099 msg
.ch_number
= ch
->number
;
1100 msg
.entry_size
= ch
->entry_size
;
1101 msg
.local_nentries
= ch
->local_nentries
;
1102 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1103 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
);
1107 xpc_send_chctl_openreply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1109 struct xpc_activate_mq_msg_chctl_openreply_uv msg
;
1111 msg
.ch_number
= ch
->number
;
1112 msg
.local_nentries
= ch
->local_nentries
;
1113 msg
.remote_nentries
= ch
->remote_nentries
;
1114 msg
.local_notify_mq_gpa
= uv_gpa(xpc_notify_mq_uv
);
1115 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1116 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
);
1120 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition
*part
, int ch_number
)
1122 unsigned long irq_flags
;
1124 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1125 part
->chctl
.flags
[ch_number
] |= XPC_CHCTL_MSGREQUEST
;
1126 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1128 xpc_wakeup_channel_mgr(part
);
1132 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel
*ch
,
1133 unsigned long msgqueue_pa
)
1135 ch
->sn
.uv
.remote_notify_mq_gpa
= msgqueue_pa
;
1139 xpc_indicate_partition_engaged_uv(struct xpc_partition
*part
)
1141 struct xpc_activate_mq_msg_uv msg
;
1143 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1144 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
);
1148 xpc_indicate_partition_disengaged_uv(struct xpc_partition
*part
)
1150 struct xpc_activate_mq_msg_uv msg
;
1152 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1153 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
);
1157 xpc_assume_partition_disengaged_uv(short partid
)
1159 struct xpc_partition_uv
*part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1160 unsigned long irq_flags
;
1162 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
1163 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
1164 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
1168 xpc_partition_engaged_uv(short partid
)
1170 return (xpc_partitions
[partid
].sn
.uv
.flags
& XPC_P_ENGAGED_UV
) != 0;
1174 xpc_any_partition_engaged_uv(void)
1176 struct xpc_partition_uv
*part_uv
;
1179 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
1180 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1181 if ((part_uv
->flags
& XPC_P_ENGAGED_UV
) != 0)
1187 static enum xp_retval
1188 xpc_allocate_msg_slot_uv(struct xpc_channel
*ch
, u32 flags
,
1189 struct xpc_send_msg_slot_uv
**address_of_msg_slot
)
1192 struct xpc_send_msg_slot_uv
*msg_slot
;
1193 struct xpc_fifo_entry_uv
*entry
;
1196 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
);
1200 if (flags
& XPC_NOWAIT
)
1203 ret
= xpc_allocate_msg_wait(ch
);
1204 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1208 msg_slot
= container_of(entry
, struct xpc_send_msg_slot_uv
, next
);
1209 *address_of_msg_slot
= msg_slot
;
1214 xpc_free_msg_slot_uv(struct xpc_channel
*ch
,
1215 struct xpc_send_msg_slot_uv
*msg_slot
)
1217 xpc_put_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
, &msg_slot
->next
);
1219 /* wakeup anyone waiting for a free msg slot */
1220 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1221 wake_up(&ch
->msg_allocate_wq
);
1225 xpc_notify_sender_uv(struct xpc_channel
*ch
,
1226 struct xpc_send_msg_slot_uv
*msg_slot
,
1227 enum xp_retval reason
)
1229 xpc_notify_func func
= msg_slot
->func
;
1231 if (func
!= NULL
&& cmpxchg(&msg_slot
->func
, func
, NULL
) == func
) {
1233 atomic_dec(&ch
->n_to_notify
);
1235 dev_dbg(xpc_chan
, "msg_slot->func() called, msg_slot=0x%p "
1236 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1237 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1239 func(reason
, ch
->partid
, ch
->number
, msg_slot
->key
);
1241 dev_dbg(xpc_chan
, "msg_slot->func() returned, msg_slot=0x%p "
1242 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1243 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1248 xpc_handle_notify_mq_ack_uv(struct xpc_channel
*ch
,
1249 struct xpc_notify_mq_msg_uv
*msg
)
1251 struct xpc_send_msg_slot_uv
*msg_slot
;
1252 int entry
= msg
->hdr
.msg_slot_number
% ch
->local_nentries
;
1254 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1256 BUG_ON(msg_slot
->msg_slot_number
!= msg
->hdr
.msg_slot_number
);
1257 msg_slot
->msg_slot_number
+= ch
->local_nentries
;
1259 if (msg_slot
->func
!= NULL
)
1260 xpc_notify_sender_uv(ch
, msg_slot
, xpMsgDelivered
);
1262 xpc_free_msg_slot_uv(ch
, msg_slot
);
1266 xpc_handle_notify_mq_msg_uv(struct xpc_partition
*part
,
1267 struct xpc_notify_mq_msg_uv
*msg
)
1269 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
1270 struct xpc_channel
*ch
;
1271 struct xpc_channel_uv
*ch_uv
;
1272 struct xpc_notify_mq_msg_uv
*msg_slot
;
1273 unsigned long irq_flags
;
1274 int ch_number
= msg
->hdr
.ch_number
;
1276 if (unlikely(ch_number
>= part
->nchannels
)) {
1277 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received invalid "
1278 "channel number=0x%x in message from partid=%d\n",
1279 ch_number
, XPC_PARTID(part
));
1281 /* get hb checker to deactivate from the remote partition */
1282 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1283 if (part_uv
->act_state_req
== 0)
1284 xpc_activate_IRQ_rcvd
++;
1285 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
1286 part_uv
->reason
= xpBadChannelNumber
;
1287 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1289 wake_up_interruptible(&xpc_activate_IRQ_wq
);
1293 ch
= &part
->channels
[ch_number
];
1294 xpc_msgqueue_ref(ch
);
1296 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1297 xpc_msgqueue_deref(ch
);
1301 /* see if we're really dealing with an ACK for a previously sent msg */
1302 if (msg
->hdr
.size
== 0) {
1303 xpc_handle_notify_mq_ack_uv(ch
, msg
);
1304 xpc_msgqueue_deref(ch
);
1308 /* we're dealing with a normal message sent via the notify_mq */
1311 msg_slot
= ch_uv
->recv_msg_slots
+
1312 (msg
->hdr
.msg_slot_number
% ch
->remote_nentries
) * ch
->entry_size
;
1314 BUG_ON(msg
->hdr
.msg_slot_number
!= msg_slot
->hdr
.msg_slot_number
);
1315 BUG_ON(msg_slot
->hdr
.size
!= 0);
1317 memcpy(msg_slot
, msg
, msg
->hdr
.size
);
1319 xpc_put_fifo_entry_uv(&ch_uv
->recv_msg_list
, &msg_slot
->hdr
.u
.next
);
1321 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) {
1323 * If there is an existing idle kthread get it to deliver
1324 * the payload, otherwise we'll have to get the channel mgr
1325 * for this partition to create a kthread to do the delivery.
1327 if (atomic_read(&ch
->kthreads_idle
) > 0)
1328 wake_up_nr(&ch
->idle_wq
, 1);
1330 xpc_send_chctl_local_msgrequest_uv(part
, ch
->number
);
1332 xpc_msgqueue_deref(ch
);
1336 xpc_handle_notify_IRQ_uv(int irq
, void *dev_id
)
1338 struct xpc_notify_mq_msg_uv
*msg
;
1340 struct xpc_partition
*part
;
1342 while ((msg
= gru_get_next_message(xpc_notify_mq_uv
)) != NULL
) {
1344 partid
= msg
->hdr
.partid
;
1345 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
1346 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received "
1347 "invalid partid=0x%x in message\n", partid
);
1349 part
= &xpc_partitions
[partid
];
1351 if (xpc_part_ref(part
)) {
1352 xpc_handle_notify_mq_msg_uv(part
, msg
);
1353 xpc_part_deref(part
);
1357 gru_free_message(xpc_notify_mq_uv
, msg
);
1364 xpc_n_of_deliverable_payloads_uv(struct xpc_channel
*ch
)
1366 return xpc_n_of_fifo_entries_uv(&ch
->sn
.uv
.recv_msg_list
);
1370 xpc_process_msg_chctl_flags_uv(struct xpc_partition
*part
, int ch_number
)
1372 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1373 int ndeliverable_payloads
;
1375 xpc_msgqueue_ref(ch
);
1377 ndeliverable_payloads
= xpc_n_of_deliverable_payloads_uv(ch
);
1379 if (ndeliverable_payloads
> 0 &&
1380 (ch
->flags
& XPC_C_CONNECTED
) &&
1381 (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)) {
1383 xpc_activate_kthreads(ch
, ndeliverable_payloads
);
1386 xpc_msgqueue_deref(ch
);
1389 static enum xp_retval
1390 xpc_send_payload_uv(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1391 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1394 enum xp_retval ret
= xpSuccess
;
1395 struct xpc_send_msg_slot_uv
*msg_slot
= NULL
;
1396 struct xpc_notify_mq_msg_uv
*msg
;
1397 u8 msg_buffer
[XPC_NOTIFY_MSG_SIZE_UV
];
1400 DBUG_ON(notify_type
!= XPC_N_CALL
);
1402 msg_size
= sizeof(struct xpc_notify_mq_msghdr_uv
) + payload_size
;
1403 if (msg_size
> ch
->entry_size
)
1404 return xpPayloadTooBig
;
1406 xpc_msgqueue_ref(ch
);
1408 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1412 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1413 ret
= xpNotConnected
;
1417 ret
= xpc_allocate_msg_slot_uv(ch
, flags
, &msg_slot
);
1418 if (ret
!= xpSuccess
)
1422 atomic_inc(&ch
->n_to_notify
);
1424 msg_slot
->key
= key
;
1425 smp_wmb(); /* a non-NULL func must hit memory after the key */
1426 msg_slot
->func
= func
;
1428 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1434 msg
= (struct xpc_notify_mq_msg_uv
*)&msg_buffer
;
1435 msg
->hdr
.partid
= xp_partition_id
;
1436 msg
->hdr
.ch_number
= ch
->number
;
1437 msg
->hdr
.size
= msg_size
;
1438 msg
->hdr
.msg_slot_number
= msg_slot
->msg_slot_number
;
1439 memcpy(&msg
->payload
, payload
, payload_size
);
1441 ret
= xpc_send_gru_msg(ch
->sn
.uv
.remote_notify_mq_gpa
, msg
, msg_size
);
1442 if (ret
== xpSuccess
)
1445 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1449 * Try to NULL the msg_slot's func field. If we fail, then
1450 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1451 * case we need to pretend we succeeded to send the message
1452 * since the user will get a callout for the disconnect error
1453 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1454 * error returned here will confuse them. Additionally, since
1455 * in this case the channel is being disconnected we don't need
1456 * to put the the msg_slot back on the free list.
1458 if (cmpxchg(&msg_slot
->func
, func
, NULL
) != func
) {
1463 msg_slot
->key
= NULL
;
1464 atomic_dec(&ch
->n_to_notify
);
1466 xpc_free_msg_slot_uv(ch
, msg_slot
);
1468 xpc_msgqueue_deref(ch
);
1473 * Tell the callers of xpc_send_notify() that the status of their payloads
1474 * is unknown because the channel is now disconnecting.
1476 * We don't worry about putting these msg_slots on the free list since the
1477 * msg_slots themselves are about to be kfree'd.
1480 xpc_notify_senders_of_disconnect_uv(struct xpc_channel
*ch
)
1482 struct xpc_send_msg_slot_uv
*msg_slot
;
1485 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTING
));
1487 for (entry
= 0; entry
< ch
->local_nentries
; entry
++) {
1489 if (atomic_read(&ch
->n_to_notify
) == 0)
1492 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1493 if (msg_slot
->func
!= NULL
)
1494 xpc_notify_sender_uv(ch
, msg_slot
, ch
->reason
);
1499 * Get the next deliverable message's payload.
1502 xpc_get_deliverable_payload_uv(struct xpc_channel
*ch
)
1504 struct xpc_fifo_entry_uv
*entry
;
1505 struct xpc_notify_mq_msg_uv
*msg
;
1506 void *payload
= NULL
;
1508 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
1509 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.recv_msg_list
);
1510 if (entry
!= NULL
) {
1511 msg
= container_of(entry
, struct xpc_notify_mq_msg_uv
,
1513 payload
= &msg
->payload
;
1520 xpc_received_payload_uv(struct xpc_channel
*ch
, void *payload
)
1522 struct xpc_notify_mq_msg_uv
*msg
;
1525 msg
= container_of(payload
, struct xpc_notify_mq_msg_uv
, payload
);
1527 /* return an ACK to the sender of this message */
1529 msg
->hdr
.partid
= xp_partition_id
;
1530 msg
->hdr
.size
= 0; /* size of zero indicates this is an ACK */
1532 ret
= xpc_send_gru_msg(ch
->sn
.uv
.remote_notify_mq_gpa
, msg
,
1533 sizeof(struct xpc_notify_mq_msghdr_uv
));
1534 if (ret
!= xpSuccess
)
1535 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1537 msg
->hdr
.msg_slot_number
+= ch
->remote_nentries
;
1543 xpc_setup_partitions_sn
= xpc_setup_partitions_sn_uv
;
1544 xpc_process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_uv
;
1545 xpc_get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_uv
;
1546 xpc_setup_rsvd_page_sn
= xpc_setup_rsvd_page_sn_uv
;
1547 xpc_increment_heartbeat
= xpc_increment_heartbeat_uv
;
1548 xpc_offline_heartbeat
= xpc_offline_heartbeat_uv
;
1549 xpc_online_heartbeat
= xpc_online_heartbeat_uv
;
1550 xpc_heartbeat_init
= xpc_heartbeat_init_uv
;
1551 xpc_heartbeat_exit
= xpc_heartbeat_exit_uv
;
1552 xpc_get_remote_heartbeat
= xpc_get_remote_heartbeat_uv
;
1554 xpc_request_partition_activation
= xpc_request_partition_activation_uv
;
1555 xpc_request_partition_reactivation
=
1556 xpc_request_partition_reactivation_uv
;
1557 xpc_request_partition_deactivation
=
1558 xpc_request_partition_deactivation_uv
;
1559 xpc_cancel_partition_deactivation_request
=
1560 xpc_cancel_partition_deactivation_request_uv
;
1562 xpc_setup_ch_structures_sn
= xpc_setup_ch_structures_sn_uv
;
1563 xpc_teardown_ch_structures_sn
= xpc_teardown_ch_structures_sn_uv
;
1565 xpc_make_first_contact
= xpc_make_first_contact_uv
;
1567 xpc_get_chctl_all_flags
= xpc_get_chctl_all_flags_uv
;
1568 xpc_send_chctl_closerequest
= xpc_send_chctl_closerequest_uv
;
1569 xpc_send_chctl_closereply
= xpc_send_chctl_closereply_uv
;
1570 xpc_send_chctl_openrequest
= xpc_send_chctl_openrequest_uv
;
1571 xpc_send_chctl_openreply
= xpc_send_chctl_openreply_uv
;
1573 xpc_save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_uv
;
1575 xpc_setup_msg_structures
= xpc_setup_msg_structures_uv
;
1576 xpc_teardown_msg_structures
= xpc_teardown_msg_structures_uv
;
1578 xpc_indicate_partition_engaged
= xpc_indicate_partition_engaged_uv
;
1579 xpc_indicate_partition_disengaged
=
1580 xpc_indicate_partition_disengaged_uv
;
1581 xpc_assume_partition_disengaged
= xpc_assume_partition_disengaged_uv
;
1582 xpc_partition_engaged
= xpc_partition_engaged_uv
;
1583 xpc_any_partition_engaged
= xpc_any_partition_engaged_uv
;
1585 xpc_n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_uv
;
1586 xpc_process_msg_chctl_flags
= xpc_process_msg_chctl_flags_uv
;
1587 xpc_send_payload
= xpc_send_payload_uv
;
1588 xpc_notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_uv
;
1589 xpc_get_deliverable_payload
= xpc_get_deliverable_payload_uv
;
1590 xpc_received_payload
= xpc_received_payload_uv
;
1592 if (sizeof(struct xpc_notify_mq_msghdr_uv
) > XPC_MSG_HDR_MAX_SIZE
) {
1593 dev_err(xpc_part
, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1594 XPC_MSG_HDR_MAX_SIZE
);
1598 xpc_activate_mq_uv
= xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV
, 0,
1599 XPC_ACTIVATE_IRQ_NAME
,
1600 xpc_handle_activate_IRQ_uv
);
1601 if (IS_ERR(xpc_activate_mq_uv
))
1602 return PTR_ERR(xpc_activate_mq_uv
);
1604 xpc_notify_mq_uv
= xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV
, 0,
1605 XPC_NOTIFY_IRQ_NAME
,
1606 xpc_handle_notify_IRQ_uv
);
1607 if (IS_ERR(xpc_notify_mq_uv
)) {
1608 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1609 return PTR_ERR(xpc_notify_mq_uv
);
1618 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv
);
1619 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);