2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
10 * Cross Partition Communication (XPC) uv-based functions.
12 * Architecture specific implementation of common functions.
16 #include <linux/kernel.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/err.h>
22 #include <asm/uv/uv_hub.h>
23 #if defined CONFIG_X86_64
24 #include <asm/uv/bios.h>
25 #include <asm/uv/uv_irq.h>
26 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27 #include <asm/sn/intr.h>
28 #include <asm/sn/sn_sal.h>
30 #include "../sgi-gru/gru.h"
31 #include "../sgi-gru/grukservices.h"
34 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
35 struct uv_IO_APIC_route_entry
{
49 static struct xpc_heartbeat_uv
*xpc_heartbeat_uv
;
51 #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
52 #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
53 XPC_ACTIVATE_MSG_SIZE_UV)
54 #define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
56 #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
57 #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
58 XPC_NOTIFY_MSG_SIZE_UV)
59 #define XPC_NOTIFY_IRQ_NAME "xpc_notify"
61 static struct xpc_gru_mq_uv
*xpc_activate_mq_uv
;
62 static struct xpc_gru_mq_uv
*xpc_notify_mq_uv
;
65 xpc_setup_partitions_uv(void)
68 struct xpc_partition_uv
*part_uv
;
70 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
71 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
73 mutex_init(&part_uv
->cached_activate_gru_mq_desc_mutex
);
74 spin_lock_init(&part_uv
->flags_lock
);
75 part_uv
->remote_act_state
= XPC_P_AS_INACTIVE
;
81 xpc_teardown_partitions_uv(void)
84 struct xpc_partition_uv
*part_uv
;
85 unsigned long irq_flags
;
87 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
88 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
90 if (part_uv
->cached_activate_gru_mq_desc
!= NULL
) {
91 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
92 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
93 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
94 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
95 kfree(part_uv
->cached_activate_gru_mq_desc
);
96 part_uv
->cached_activate_gru_mq_desc
= NULL
;
97 mutex_unlock(&part_uv
->
98 cached_activate_gru_mq_desc_mutex
);
104 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
, int cpu
, char *irq_name
)
106 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
108 #if defined CONFIG_X86_64
109 mq
->irq
= uv_setup_irq(irq_name
, cpu
, mq
->mmr_blade
, mq
->mmr_offset
,
112 dev_err(xpc_part
, "uv_setup_irq() returned error=%d\n",
117 mq
->mmr_value
= uv_read_global_mmr64(mmr_pnode
, mq
->mmr_offset
);
119 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
120 if (strcmp(irq_name
, XPC_ACTIVATE_IRQ_NAME
) == 0)
121 mq
->irq
= SGI_XPC_ACTIVATE
;
122 else if (strcmp(irq_name
, XPC_NOTIFY_IRQ_NAME
) == 0)
123 mq
->irq
= SGI_XPC_NOTIFY
;
127 mq
->mmr_value
= (unsigned long)cpu_physical_id(cpu
) << 32 | mq
->irq
;
128 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mq
->mmr_value
);
130 #error not a supported configuration
137 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv
*mq
)
139 #if defined CONFIG_X86_64
140 uv_teardown_irq(mq
->irq
);
142 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
144 unsigned long mmr_value
;
146 mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
147 mmr_value
= 1UL << 16;
149 uv_write_global_mmr64(mmr_pnode
, mq
->mmr_offset
, mmr_value
);
151 #error not a supported configuration
156 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv
*mq
)
160 #if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
161 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
163 ret
= sn_mq_watchlist_alloc(mmr_pnode
, (void *)uv_gpa(mq
->address
),
164 mq
->order
, &mq
->mmr_offset
);
166 dev_err(xpc_part
, "sn_mq_watchlist_alloc() failed, ret=%d\n",
170 #elif defined CONFIG_X86_64
171 ret
= uv_bios_mq_watchlist_alloc(uv_gpa(mq
->address
),
172 mq
->order
, &mq
->mmr_offset
);
174 dev_err(xpc_part
, "uv_bios_mq_watchlist_alloc() failed, "
179 #error not a supported configuration
182 mq
->watchlist_num
= ret
;
187 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv
*mq
)
190 int mmr_pnode
= uv_blade_to_pnode(mq
->mmr_blade
);
192 #if defined CONFIG_X86_64
193 ret
= uv_bios_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
194 BUG_ON(ret
!= BIOS_STATUS_SUCCESS
);
195 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
196 ret
= sn_mq_watchlist_free(mmr_pnode
, mq
->watchlist_num
);
197 BUG_ON(ret
!= SALRET_OK
);
199 #error not a supported configuration
203 static struct xpc_gru_mq_uv
*
204 xpc_create_gru_mq_uv(unsigned int mq_size
, int cpu
, char *irq_name
,
205 irq_handler_t irq_handler
)
207 enum xp_retval xp_ret
;
212 struct xpc_gru_mq_uv
*mq
;
213 struct uv_IO_APIC_route_entry
*mmr_value
;
215 mq
= kmalloc(sizeof(struct xpc_gru_mq_uv
), GFP_KERNEL
);
217 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
218 "a xpc_gru_mq_uv structure\n");
223 mq
->gru_mq_desc
= kzalloc(sizeof(struct gru_message_queue_desc
),
225 if (mq
->gru_mq_desc
== NULL
) {
226 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to kmalloc() "
227 "a gru_message_queue_desc structure\n");
232 pg_order
= get_order(mq_size
);
233 mq
->order
= pg_order
+ PAGE_SHIFT
;
234 mq_size
= 1UL << mq
->order
;
236 mq
->mmr_blade
= uv_cpu_to_blade_id(cpu
);
238 nid
= cpu_to_node(cpu
);
239 page
= alloc_pages_exact_node(nid
, GFP_KERNEL
| __GFP_ZERO
| GFP_THISNODE
,
242 dev_err(xpc_part
, "xpc_create_gru_mq_uv() failed to alloc %d "
243 "bytes of memory on nid=%d for GRU mq\n", mq_size
, nid
);
247 mq
->address
= page_address(page
);
249 /* enable generation of irq when GRU mq operation occurs to this mq */
250 ret
= xpc_gru_mq_watchlist_alloc_uv(mq
);
254 ret
= xpc_get_gru_mq_irq_uv(mq
, cpu
, irq_name
);
258 ret
= request_irq(mq
->irq
, irq_handler
, 0, irq_name
, NULL
);
260 dev_err(xpc_part
, "request_irq(irq=%d) returned error=%d\n",
265 mmr_value
= (struct uv_IO_APIC_route_entry
*)&mq
->mmr_value
;
266 ret
= gru_create_message_queue(mq
->gru_mq_desc
, mq
->address
, mq_size
,
267 nid
, mmr_value
->vector
, mmr_value
->dest
);
269 dev_err(xpc_part
, "gru_create_message_queue() returned "
275 /* allow other partitions to access this GRU mq */
276 xp_ret
= xp_expand_memprotect(xp_pa(mq
->address
), mq_size
);
277 if (xp_ret
!= xpSuccess
) {
284 /* something went wrong */
286 free_irq(mq
->irq
, NULL
);
288 xpc_release_gru_mq_irq_uv(mq
);
290 xpc_gru_mq_watchlist_free_uv(mq
);
292 free_pages((unsigned long)mq
->address
, pg_order
);
294 kfree(mq
->gru_mq_desc
);
302 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv
*mq
)
304 unsigned int mq_size
;
308 /* disallow other partitions to access GRU mq */
309 mq_size
= 1UL << mq
->order
;
310 ret
= xp_restrict_memprotect(xp_pa(mq
->address
), mq_size
);
311 BUG_ON(ret
!= xpSuccess
);
313 /* unregister irq handler and release mq irq/vector mapping */
314 free_irq(mq
->irq
, NULL
);
315 xpc_release_gru_mq_irq_uv(mq
);
317 /* disable generation of irq when GRU mq op occurs to this mq */
318 xpc_gru_mq_watchlist_free_uv(mq
);
320 pg_order
= mq
->order
- PAGE_SHIFT
;
321 free_pages((unsigned long)mq
->address
, pg_order
);
326 static enum xp_retval
327 xpc_send_gru_msg(struct gru_message_queue_desc
*gru_mq_desc
, void *msg
,
330 enum xp_retval xp_ret
;
334 ret
= gru_send_message_gpa(gru_mq_desc
, msg
, msg_size
);
340 if (ret
== MQE_QUEUE_FULL
) {
341 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
342 "error=MQE_QUEUE_FULL\n");
343 /* !!! handle QLimit reached; delay & try again */
344 /* ??? Do we add a limit to the number of retries? */
345 (void)msleep_interruptible(10);
346 } else if (ret
== MQE_CONGESTION
) {
347 dev_dbg(xpc_chan
, "gru_send_message_gpa() returned "
348 "error=MQE_CONGESTION\n");
349 /* !!! handle LB Overflow; simply try again */
350 /* ??? Do we add a limit to the number of retries? */
352 /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
353 dev_err(xpc_chan
, "gru_send_message_gpa() returned "
355 xp_ret
= xpGruSendMqError
;
363 xpc_process_activate_IRQ_rcvd_uv(void)
365 unsigned long irq_flags
;
367 struct xpc_partition
*part
;
370 DBUG_ON(xpc_activate_IRQ_rcvd
== 0);
372 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
373 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
374 part
= &xpc_partitions
[partid
];
376 if (part
->sn
.uv
.act_state_req
== 0)
379 xpc_activate_IRQ_rcvd
--;
380 BUG_ON(xpc_activate_IRQ_rcvd
< 0);
382 act_state_req
= part
->sn
.uv
.act_state_req
;
383 part
->sn
.uv
.act_state_req
= 0;
384 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
386 if (act_state_req
== XPC_P_ASR_ACTIVATE_UV
) {
387 if (part
->act_state
== XPC_P_AS_INACTIVE
)
388 xpc_activate_partition(part
);
389 else if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
390 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
392 } else if (act_state_req
== XPC_P_ASR_REACTIVATE_UV
) {
393 if (part
->act_state
== XPC_P_AS_INACTIVE
)
394 xpc_activate_partition(part
);
396 XPC_DEACTIVATE_PARTITION(part
, xpReactivating
);
398 } else if (act_state_req
== XPC_P_ASR_DEACTIVATE_UV
) {
399 XPC_DEACTIVATE_PARTITION(part
, part
->sn
.uv
.reason
);
405 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
406 if (xpc_activate_IRQ_rcvd
== 0)
409 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
414 xpc_handle_activate_mq_msg_uv(struct xpc_partition
*part
,
415 struct xpc_activate_mq_msghdr_uv
*msg_hdr
,
416 int *wakeup_hb_checker
)
418 unsigned long irq_flags
;
419 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
420 struct xpc_openclose_args
*args
;
422 part_uv
->remote_act_state
= msg_hdr
->act_state
;
424 switch (msg_hdr
->type
) {
425 case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
:
426 /* syncing of remote_act_state was just done above */
429 case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
: {
430 struct xpc_activate_mq_msg_activate_req_uv
*msg
;
433 * ??? Do we deal here with ts_jiffies being different
434 * ??? if act_state != XPC_P_AS_INACTIVE instead of
437 msg
= container_of(msg_hdr
, struct
438 xpc_activate_mq_msg_activate_req_uv
, hdr
);
440 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
441 if (part_uv
->act_state_req
== 0)
442 xpc_activate_IRQ_rcvd
++;
443 part_uv
->act_state_req
= XPC_P_ASR_ACTIVATE_UV
;
444 part
->remote_rp_pa
= msg
->rp_gpa
; /* !!! _pa is _gpa */
445 part
->remote_rp_ts_jiffies
= msg_hdr
->rp_ts_jiffies
;
446 part_uv
->heartbeat_gpa
= msg
->heartbeat_gpa
;
448 if (msg
->activate_gru_mq_desc_gpa
!=
449 part_uv
->activate_gru_mq_desc_gpa
) {
450 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
451 part_uv
->flags
&= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
452 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
453 part_uv
->activate_gru_mq_desc_gpa
=
454 msg
->activate_gru_mq_desc_gpa
;
456 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
458 (*wakeup_hb_checker
)++;
461 case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
: {
462 struct xpc_activate_mq_msg_deactivate_req_uv
*msg
;
464 msg
= container_of(msg_hdr
, struct
465 xpc_activate_mq_msg_deactivate_req_uv
, hdr
);
467 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
468 if (part_uv
->act_state_req
== 0)
469 xpc_activate_IRQ_rcvd
++;
470 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
471 part_uv
->reason
= msg
->reason
;
472 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
474 (*wakeup_hb_checker
)++;
477 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
: {
478 struct xpc_activate_mq_msg_chctl_closerequest_uv
*msg
;
480 msg
= container_of(msg_hdr
, struct
481 xpc_activate_mq_msg_chctl_closerequest_uv
,
483 args
= &part
->remote_openclose_args
[msg
->ch_number
];
484 args
->reason
= msg
->reason
;
486 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
487 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREQUEST
;
488 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
490 xpc_wakeup_channel_mgr(part
);
493 case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
: {
494 struct xpc_activate_mq_msg_chctl_closereply_uv
*msg
;
496 msg
= container_of(msg_hdr
, struct
497 xpc_activate_mq_msg_chctl_closereply_uv
,
500 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
501 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_CLOSEREPLY
;
502 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
504 xpc_wakeup_channel_mgr(part
);
507 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
: {
508 struct xpc_activate_mq_msg_chctl_openrequest_uv
*msg
;
510 msg
= container_of(msg_hdr
, struct
511 xpc_activate_mq_msg_chctl_openrequest_uv
,
513 args
= &part
->remote_openclose_args
[msg
->ch_number
];
514 args
->entry_size
= msg
->entry_size
;
515 args
->local_nentries
= msg
->local_nentries
;
517 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
518 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREQUEST
;
519 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
521 xpc_wakeup_channel_mgr(part
);
524 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
: {
525 struct xpc_activate_mq_msg_chctl_openreply_uv
*msg
;
527 msg
= container_of(msg_hdr
, struct
528 xpc_activate_mq_msg_chctl_openreply_uv
, hdr
);
529 args
= &part
->remote_openclose_args
[msg
->ch_number
];
530 args
->remote_nentries
= msg
->remote_nentries
;
531 args
->local_nentries
= msg
->local_nentries
;
532 args
->local_msgqueue_pa
= msg
->notify_gru_mq_desc_gpa
;
534 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
535 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENREPLY
;
536 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
538 xpc_wakeup_channel_mgr(part
);
541 case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
: {
542 struct xpc_activate_mq_msg_chctl_opencomplete_uv
*msg
;
544 msg
= container_of(msg_hdr
, struct
545 xpc_activate_mq_msg_chctl_opencomplete_uv
, hdr
);
546 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
547 part
->chctl
.flags
[msg
->ch_number
] |= XPC_CHCTL_OPENCOMPLETE
;
548 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
550 xpc_wakeup_channel_mgr(part
);
552 case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
:
553 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
554 part_uv
->flags
|= XPC_P_ENGAGED_UV
;
555 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
558 case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
:
559 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
560 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
561 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
565 dev_err(xpc_part
, "received unknown activate_mq msg type=%d "
566 "from partition=%d\n", msg_hdr
->type
, XPC_PARTID(part
));
568 /* get hb checker to deactivate from the remote partition */
569 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
570 if (part_uv
->act_state_req
== 0)
571 xpc_activate_IRQ_rcvd
++;
572 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
573 part_uv
->reason
= xpBadMsgType
;
574 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
576 (*wakeup_hb_checker
)++;
580 if (msg_hdr
->rp_ts_jiffies
!= part
->remote_rp_ts_jiffies
&&
581 part
->remote_rp_ts_jiffies
!= 0) {
583 * ??? Does what we do here need to be sensitive to
584 * ??? act_state or remote_act_state?
586 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
587 if (part_uv
->act_state_req
== 0)
588 xpc_activate_IRQ_rcvd
++;
589 part_uv
->act_state_req
= XPC_P_ASR_REACTIVATE_UV
;
590 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
592 (*wakeup_hb_checker
)++;
597 xpc_handle_activate_IRQ_uv(int irq
, void *dev_id
)
599 struct xpc_activate_mq_msghdr_uv
*msg_hdr
;
601 struct xpc_partition
*part
;
602 int wakeup_hb_checker
= 0;
606 msg_hdr
= gru_get_next_message(xpc_activate_mq_uv
->gru_mq_desc
);
610 partid
= msg_hdr
->partid
;
611 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
612 dev_err(xpc_part
, "xpc_handle_activate_IRQ_uv() "
613 "received invalid partid=0x%x in message\n",
616 part
= &xpc_partitions
[partid
];
618 part_referenced
= xpc_part_ref(part
);
619 xpc_handle_activate_mq_msg_uv(part
, msg_hdr
,
622 xpc_part_deref(part
);
625 gru_free_message(xpc_activate_mq_uv
->gru_mq_desc
, msg_hdr
);
628 if (wakeup_hb_checker
)
629 wake_up_interruptible(&xpc_activate_IRQ_wq
);
634 static enum xp_retval
635 xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc
*gru_mq_desc
,
636 unsigned long gru_mq_desc_gpa
)
640 ret
= xp_remote_memcpy(uv_gpa(gru_mq_desc
), gru_mq_desc_gpa
,
641 sizeof(struct gru_message_queue_desc
));
642 if (ret
== xpSuccess
)
643 gru_mq_desc
->mq
= NULL
;
648 static enum xp_retval
649 xpc_send_activate_IRQ_uv(struct xpc_partition
*part
, void *msg
, size_t msg_size
,
652 struct xpc_activate_mq_msghdr_uv
*msg_hdr
= msg
;
653 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
654 struct gru_message_queue_desc
*gru_mq_desc
;
655 unsigned long irq_flags
;
658 DBUG_ON(msg_size
> XPC_ACTIVATE_MSG_SIZE_UV
);
660 msg_hdr
->type
= msg_type
;
661 msg_hdr
->partid
= xp_partition_id
;
662 msg_hdr
->act_state
= part
->act_state
;
663 msg_hdr
->rp_ts_jiffies
= xpc_rsvd_page
->ts_jiffies
;
665 mutex_lock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
667 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
)) {
668 gru_mq_desc
= part_uv
->cached_activate_gru_mq_desc
;
669 if (gru_mq_desc
== NULL
) {
670 gru_mq_desc
= kmalloc(sizeof(struct
671 gru_message_queue_desc
),
673 if (gru_mq_desc
== NULL
) {
677 part_uv
->cached_activate_gru_mq_desc
= gru_mq_desc
;
680 ret
= xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc
,
682 activate_gru_mq_desc_gpa
);
683 if (ret
!= xpSuccess
)
686 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
687 part_uv
->flags
|= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
;
688 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
691 /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
692 ret
= xpc_send_gru_msg(part_uv
->cached_activate_gru_mq_desc
, msg
,
694 if (ret
!= xpSuccess
) {
695 smp_rmb(); /* ensure a fresh copy of part_uv->flags */
696 if (!(part_uv
->flags
& XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV
))
700 mutex_unlock(&part_uv
->cached_activate_gru_mq_desc_mutex
);
705 xpc_send_activate_IRQ_part_uv(struct xpc_partition
*part
, void *msg
,
706 size_t msg_size
, int msg_type
)
710 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
711 if (unlikely(ret
!= xpSuccess
))
712 XPC_DEACTIVATE_PARTITION(part
, ret
);
716 xpc_send_activate_IRQ_ch_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
,
717 void *msg
, size_t msg_size
, int msg_type
)
719 struct xpc_partition
*part
= &xpc_partitions
[ch
->partid
];
722 ret
= xpc_send_activate_IRQ_uv(part
, msg
, msg_size
, msg_type
);
723 if (unlikely(ret
!= xpSuccess
)) {
724 if (irq_flags
!= NULL
)
725 spin_unlock_irqrestore(&ch
->lock
, *irq_flags
);
727 XPC_DEACTIVATE_PARTITION(part
, ret
);
729 if (irq_flags
!= NULL
)
730 spin_lock_irqsave(&ch
->lock
, *irq_flags
);
735 xpc_send_local_activate_IRQ_uv(struct xpc_partition
*part
, int act_state_req
)
737 unsigned long irq_flags
;
738 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
741 * !!! Make our side think that the remote partition sent an activate
742 * !!! mq message our way by doing what the activate IRQ handler would
743 * !!! do had one really been sent.
746 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
747 if (part_uv
->act_state_req
== 0)
748 xpc_activate_IRQ_rcvd
++;
749 part_uv
->act_state_req
= act_state_req
;
750 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
752 wake_up_interruptible(&xpc_activate_IRQ_wq
);
755 static enum xp_retval
756 xpc_get_partition_rsvd_page_pa_uv(void *buf
, u64
*cookie
, unsigned long *rp_pa
,
762 #if defined CONFIG_X86_64
763 status
= uv_bios_reserved_page_pa((u64
)buf
, cookie
, (u64
*)rp_pa
,
765 if (status
== BIOS_STATUS_SUCCESS
)
767 else if (status
== BIOS_STATUS_MORE_PASSES
)
768 ret
= xpNeedMoreInfo
;
772 #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
773 status
= sn_partition_reserved_page_pa((u64
)buf
, cookie
, rp_pa
, len
);
774 if (status
== SALRET_OK
)
776 else if (status
== SALRET_MORE_PASSES
)
777 ret
= xpNeedMoreInfo
;
782 #error not a supported configuration
789 xpc_setup_rsvd_page_uv(struct xpc_rsvd_page
*rp
)
792 &xpc_partitions
[sn_partition_id
].sn
.uv
.cached_heartbeat
;
793 rp
->sn
.uv
.heartbeat_gpa
= uv_gpa(xpc_heartbeat_uv
);
794 rp
->sn
.uv
.activate_gru_mq_desc_gpa
=
795 uv_gpa(xpc_activate_mq_uv
->gru_mq_desc
);
800 xpc_allow_hb_uv(short partid
)
805 xpc_disallow_hb_uv(short partid
)
810 xpc_disallow_all_hbs_uv(void)
815 xpc_increment_heartbeat_uv(void)
817 xpc_heartbeat_uv
->value
++;
821 xpc_offline_heartbeat_uv(void)
823 xpc_increment_heartbeat_uv();
824 xpc_heartbeat_uv
->offline
= 1;
828 xpc_online_heartbeat_uv(void)
830 xpc_increment_heartbeat_uv();
831 xpc_heartbeat_uv
->offline
= 0;
835 xpc_heartbeat_init_uv(void)
837 xpc_heartbeat_uv
->value
= 1;
838 xpc_heartbeat_uv
->offline
= 0;
842 xpc_heartbeat_exit_uv(void)
844 xpc_offline_heartbeat_uv();
847 static enum xp_retval
848 xpc_get_remote_heartbeat_uv(struct xpc_partition
*part
)
850 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
853 ret
= xp_remote_memcpy(uv_gpa(&part_uv
->cached_heartbeat
),
854 part_uv
->heartbeat_gpa
,
855 sizeof(struct xpc_heartbeat_uv
));
856 if (ret
!= xpSuccess
)
859 if (part_uv
->cached_heartbeat
.value
== part
->last_heartbeat
&&
860 !part_uv
->cached_heartbeat
.offline
) {
864 part
->last_heartbeat
= part_uv
->cached_heartbeat
.value
;
870 xpc_request_partition_activation_uv(struct xpc_rsvd_page
*remote_rp
,
871 unsigned long remote_rp_gpa
, int nasid
)
873 short partid
= remote_rp
->SAL_partid
;
874 struct xpc_partition
*part
= &xpc_partitions
[partid
];
875 struct xpc_activate_mq_msg_activate_req_uv msg
;
877 part
->remote_rp_pa
= remote_rp_gpa
; /* !!! _pa here is really _gpa */
878 part
->remote_rp_ts_jiffies
= remote_rp
->ts_jiffies
;
879 part
->sn
.uv
.heartbeat_gpa
= remote_rp
->sn
.uv
.heartbeat_gpa
;
880 part
->sn
.uv
.activate_gru_mq_desc_gpa
=
881 remote_rp
->sn
.uv
.activate_gru_mq_desc_gpa
;
884 * ??? Is it a good idea to make this conditional on what is
885 * ??? potentially stale state information?
887 if (part
->sn
.uv
.remote_act_state
== XPC_P_AS_INACTIVE
) {
888 msg
.rp_gpa
= uv_gpa(xpc_rsvd_page
);
889 msg
.heartbeat_gpa
= xpc_rsvd_page
->sn
.uv
.heartbeat_gpa
;
890 msg
.activate_gru_mq_desc_gpa
=
891 xpc_rsvd_page
->sn
.uv
.activate_gru_mq_desc_gpa
;
892 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
893 XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV
);
896 if (part
->act_state
== XPC_P_AS_INACTIVE
)
897 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
901 xpc_request_partition_reactivation_uv(struct xpc_partition
*part
)
903 xpc_send_local_activate_IRQ_uv(part
, XPC_P_ASR_ACTIVATE_UV
);
907 xpc_request_partition_deactivation_uv(struct xpc_partition
*part
)
909 struct xpc_activate_mq_msg_deactivate_req_uv msg
;
912 * ??? Is it a good idea to make this conditional on what is
913 * ??? potentially stale state information?
915 if (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_DEACTIVATING
&&
916 part
->sn
.uv
.remote_act_state
!= XPC_P_AS_INACTIVE
) {
918 msg
.reason
= part
->reason
;
919 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
920 XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV
);
925 xpc_cancel_partition_deactivation_request_uv(struct xpc_partition
*part
)
927 /* nothing needs to be done */
932 xpc_init_fifo_uv(struct xpc_fifo_head_uv
*head
)
936 spin_lock_init(&head
->lock
);
941 xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv
*head
)
943 unsigned long irq_flags
;
944 struct xpc_fifo_entry_uv
*first
;
946 spin_lock_irqsave(&head
->lock
, irq_flags
);
948 if (head
->first
!= NULL
) {
949 head
->first
= first
->next
;
950 if (head
->first
== NULL
)
954 BUG_ON(head
->n_entries
< 0);
958 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
963 xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv
*head
,
964 struct xpc_fifo_entry_uv
*last
)
966 unsigned long irq_flags
;
969 spin_lock_irqsave(&head
->lock
, irq_flags
);
970 if (head
->last
!= NULL
)
971 head
->last
->next
= last
;
976 spin_unlock_irqrestore(&head
->lock
, irq_flags
);
980 xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv
*head
)
982 return head
->n_entries
;
986 * Setup the channel structures that are uv specific.
988 static enum xp_retval
989 xpc_setup_ch_structures_uv(struct xpc_partition
*part
)
991 struct xpc_channel_uv
*ch_uv
;
994 for (ch_number
= 0; ch_number
< part
->nchannels
; ch_number
++) {
995 ch_uv
= &part
->channels
[ch_number
].sn
.uv
;
997 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
998 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1005 * Teardown the channel structures that are uv specific.
1008 xpc_teardown_ch_structures_uv(struct xpc_partition
*part
)
1010 /* nothing needs to be done */
1014 static enum xp_retval
1015 xpc_make_first_contact_uv(struct xpc_partition
*part
)
1017 struct xpc_activate_mq_msg_uv msg
;
1020 * We send a sync msg to get the remote partition's remote_act_state
1021 * updated to our current act_state which at this point should
1022 * be XPC_P_AS_ACTIVATING.
1024 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1025 XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV
);
1027 while (part
->sn
.uv
.remote_act_state
!= XPC_P_AS_ACTIVATING
) {
1029 dev_dbg(xpc_part
, "waiting to make first contact with "
1030 "partition %d\n", XPC_PARTID(part
));
1032 /* wait a 1/4 of a second or so */
1033 (void)msleep_interruptible(250);
1035 if (part
->act_state
== XPC_P_AS_DEACTIVATING
)
1036 return part
->reason
;
1043 xpc_get_chctl_all_flags_uv(struct xpc_partition
*part
)
1045 unsigned long irq_flags
;
1046 union xpc_channel_ctl_flags chctl
;
1048 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1049 chctl
= part
->chctl
;
1050 if (chctl
.all_flags
!= 0)
1051 part
->chctl
.all_flags
= 0;
1053 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1054 return chctl
.all_flags
;
1057 static enum xp_retval
1058 xpc_allocate_send_msg_slot_uv(struct xpc_channel
*ch
)
1060 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1061 struct xpc_send_msg_slot_uv
*msg_slot
;
1062 unsigned long irq_flags
;
1067 for (nentries
= ch
->local_nentries
; nentries
> 0; nentries
--) {
1068 nbytes
= nentries
* sizeof(struct xpc_send_msg_slot_uv
);
1069 ch_uv
->send_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1070 if (ch_uv
->send_msg_slots
== NULL
)
1073 for (entry
= 0; entry
< nentries
; entry
++) {
1074 msg_slot
= &ch_uv
->send_msg_slots
[entry
];
1076 msg_slot
->msg_slot_number
= entry
;
1077 xpc_put_fifo_entry_uv(&ch_uv
->msg_slot_free_list
,
1081 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1082 if (nentries
< ch
->local_nentries
)
1083 ch
->local_nentries
= nentries
;
1084 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1091 static enum xp_retval
1092 xpc_allocate_recv_msg_slot_uv(struct xpc_channel
*ch
)
1094 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1095 struct xpc_notify_mq_msg_uv
*msg_slot
;
1096 unsigned long irq_flags
;
1101 for (nentries
= ch
->remote_nentries
; nentries
> 0; nentries
--) {
1102 nbytes
= nentries
* ch
->entry_size
;
1103 ch_uv
->recv_msg_slots
= kzalloc(nbytes
, GFP_KERNEL
);
1104 if (ch_uv
->recv_msg_slots
== NULL
)
1107 for (entry
= 0; entry
< nentries
; entry
++) {
1108 msg_slot
= ch_uv
->recv_msg_slots
+
1109 entry
* ch
->entry_size
;
1111 msg_slot
->hdr
.msg_slot_number
= entry
;
1114 spin_lock_irqsave(&ch
->lock
, irq_flags
);
1115 if (nentries
< ch
->remote_nentries
)
1116 ch
->remote_nentries
= nentries
;
1117 spin_unlock_irqrestore(&ch
->lock
, irq_flags
);
1125 * Allocate msg_slots associated with the channel.
1127 static enum xp_retval
1128 xpc_setup_msg_structures_uv(struct xpc_channel
*ch
)
1130 static enum xp_retval ret
;
1131 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1133 DBUG_ON(ch
->flags
& XPC_C_SETUP
);
1135 ch_uv
->cached_notify_gru_mq_desc
= kmalloc(sizeof(struct
1136 gru_message_queue_desc
),
1138 if (ch_uv
->cached_notify_gru_mq_desc
== NULL
)
1141 ret
= xpc_allocate_send_msg_slot_uv(ch
);
1142 if (ret
== xpSuccess
) {
1144 ret
= xpc_allocate_recv_msg_slot_uv(ch
);
1145 if (ret
!= xpSuccess
) {
1146 kfree(ch_uv
->send_msg_slots
);
1147 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1154 * Free up msg_slots and clear other stuff that were setup for the specified
1158 xpc_teardown_msg_structures_uv(struct xpc_channel
*ch
)
1160 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1162 DBUG_ON(!spin_is_locked(&ch
->lock
));
1164 kfree(ch_uv
->cached_notify_gru_mq_desc
);
1165 ch_uv
->cached_notify_gru_mq_desc
= NULL
;
1167 if (ch
->flags
& XPC_C_SETUP
) {
1168 xpc_init_fifo_uv(&ch_uv
->msg_slot_free_list
);
1169 kfree(ch_uv
->send_msg_slots
);
1170 xpc_init_fifo_uv(&ch_uv
->recv_msg_list
);
1171 kfree(ch_uv
->recv_msg_slots
);
1176 xpc_send_chctl_closerequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1178 struct xpc_activate_mq_msg_chctl_closerequest_uv msg
;
1180 msg
.ch_number
= ch
->number
;
1181 msg
.reason
= ch
->reason
;
1182 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1183 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV
);
1187 xpc_send_chctl_closereply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1189 struct xpc_activate_mq_msg_chctl_closereply_uv msg
;
1191 msg
.ch_number
= ch
->number
;
1192 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1193 XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV
);
1197 xpc_send_chctl_openrequest_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1199 struct xpc_activate_mq_msg_chctl_openrequest_uv msg
;
1201 msg
.ch_number
= ch
->number
;
1202 msg
.entry_size
= ch
->entry_size
;
1203 msg
.local_nentries
= ch
->local_nentries
;
1204 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1205 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV
);
1209 xpc_send_chctl_openreply_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1211 struct xpc_activate_mq_msg_chctl_openreply_uv msg
;
1213 msg
.ch_number
= ch
->number
;
1214 msg
.local_nentries
= ch
->local_nentries
;
1215 msg
.remote_nentries
= ch
->remote_nentries
;
1216 msg
.notify_gru_mq_desc_gpa
= uv_gpa(xpc_notify_mq_uv
->gru_mq_desc
);
1217 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1218 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV
);
1222 xpc_send_chctl_opencomplete_uv(struct xpc_channel
*ch
, unsigned long *irq_flags
)
1224 struct xpc_activate_mq_msg_chctl_opencomplete_uv msg
;
1226 msg
.ch_number
= ch
->number
;
1227 xpc_send_activate_IRQ_ch_uv(ch
, irq_flags
, &msg
, sizeof(msg
),
1228 XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV
);
1232 xpc_send_chctl_local_msgrequest_uv(struct xpc_partition
*part
, int ch_number
)
1234 unsigned long irq_flags
;
1236 spin_lock_irqsave(&part
->chctl_lock
, irq_flags
);
1237 part
->chctl
.flags
[ch_number
] |= XPC_CHCTL_MSGREQUEST
;
1238 spin_unlock_irqrestore(&part
->chctl_lock
, irq_flags
);
1240 xpc_wakeup_channel_mgr(part
);
1243 static enum xp_retval
1244 xpc_save_remote_msgqueue_pa_uv(struct xpc_channel
*ch
,
1245 unsigned long gru_mq_desc_gpa
)
1247 struct xpc_channel_uv
*ch_uv
= &ch
->sn
.uv
;
1249 DBUG_ON(ch_uv
->cached_notify_gru_mq_desc
== NULL
);
1250 return xpc_cache_remote_gru_mq_desc_uv(ch_uv
->cached_notify_gru_mq_desc
,
1255 xpc_indicate_partition_engaged_uv(struct xpc_partition
*part
)
1257 struct xpc_activate_mq_msg_uv msg
;
1259 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1260 XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV
);
1264 xpc_indicate_partition_disengaged_uv(struct xpc_partition
*part
)
1266 struct xpc_activate_mq_msg_uv msg
;
1268 xpc_send_activate_IRQ_part_uv(part
, &msg
, sizeof(msg
),
1269 XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV
);
1273 xpc_assume_partition_disengaged_uv(short partid
)
1275 struct xpc_partition_uv
*part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1276 unsigned long irq_flags
;
1278 spin_lock_irqsave(&part_uv
->flags_lock
, irq_flags
);
1279 part_uv
->flags
&= ~XPC_P_ENGAGED_UV
;
1280 spin_unlock_irqrestore(&part_uv
->flags_lock
, irq_flags
);
1284 xpc_partition_engaged_uv(short partid
)
1286 return (xpc_partitions
[partid
].sn
.uv
.flags
& XPC_P_ENGAGED_UV
) != 0;
1290 xpc_any_partition_engaged_uv(void)
1292 struct xpc_partition_uv
*part_uv
;
1295 for (partid
= 0; partid
< XP_MAX_NPARTITIONS_UV
; partid
++) {
1296 part_uv
= &xpc_partitions
[partid
].sn
.uv
;
1297 if ((part_uv
->flags
& XPC_P_ENGAGED_UV
) != 0)
1303 static enum xp_retval
1304 xpc_allocate_msg_slot_uv(struct xpc_channel
*ch
, u32 flags
,
1305 struct xpc_send_msg_slot_uv
**address_of_msg_slot
)
1308 struct xpc_send_msg_slot_uv
*msg_slot
;
1309 struct xpc_fifo_entry_uv
*entry
;
1312 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
);
1316 if (flags
& XPC_NOWAIT
)
1319 ret
= xpc_allocate_msg_wait(ch
);
1320 if (ret
!= xpInterrupted
&& ret
!= xpTimeout
)
1324 msg_slot
= container_of(entry
, struct xpc_send_msg_slot_uv
, next
);
1325 *address_of_msg_slot
= msg_slot
;
1330 xpc_free_msg_slot_uv(struct xpc_channel
*ch
,
1331 struct xpc_send_msg_slot_uv
*msg_slot
)
1333 xpc_put_fifo_entry_uv(&ch
->sn
.uv
.msg_slot_free_list
, &msg_slot
->next
);
1335 /* wakeup anyone waiting for a free msg slot */
1336 if (atomic_read(&ch
->n_on_msg_allocate_wq
) > 0)
1337 wake_up(&ch
->msg_allocate_wq
);
1341 xpc_notify_sender_uv(struct xpc_channel
*ch
,
1342 struct xpc_send_msg_slot_uv
*msg_slot
,
1343 enum xp_retval reason
)
1345 xpc_notify_func func
= msg_slot
->func
;
1347 if (func
!= NULL
&& cmpxchg(&msg_slot
->func
, func
, NULL
) == func
) {
1349 atomic_dec(&ch
->n_to_notify
);
1351 dev_dbg(xpc_chan
, "msg_slot->func() called, msg_slot=0x%p "
1352 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1353 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1355 func(reason
, ch
->partid
, ch
->number
, msg_slot
->key
);
1357 dev_dbg(xpc_chan
, "msg_slot->func() returned, msg_slot=0x%p "
1358 "msg_slot_number=%d partid=%d channel=%d\n", msg_slot
,
1359 msg_slot
->msg_slot_number
, ch
->partid
, ch
->number
);
1364 xpc_handle_notify_mq_ack_uv(struct xpc_channel
*ch
,
1365 struct xpc_notify_mq_msg_uv
*msg
)
1367 struct xpc_send_msg_slot_uv
*msg_slot
;
1368 int entry
= msg
->hdr
.msg_slot_number
% ch
->local_nentries
;
1370 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1372 BUG_ON(msg_slot
->msg_slot_number
!= msg
->hdr
.msg_slot_number
);
1373 msg_slot
->msg_slot_number
+= ch
->local_nentries
;
1375 if (msg_slot
->func
!= NULL
)
1376 xpc_notify_sender_uv(ch
, msg_slot
, xpMsgDelivered
);
1378 xpc_free_msg_slot_uv(ch
, msg_slot
);
1382 xpc_handle_notify_mq_msg_uv(struct xpc_partition
*part
,
1383 struct xpc_notify_mq_msg_uv
*msg
)
1385 struct xpc_partition_uv
*part_uv
= &part
->sn
.uv
;
1386 struct xpc_channel
*ch
;
1387 struct xpc_channel_uv
*ch_uv
;
1388 struct xpc_notify_mq_msg_uv
*msg_slot
;
1389 unsigned long irq_flags
;
1390 int ch_number
= msg
->hdr
.ch_number
;
1392 if (unlikely(ch_number
>= part
->nchannels
)) {
1393 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received invalid "
1394 "channel number=0x%x in message from partid=%d\n",
1395 ch_number
, XPC_PARTID(part
));
1397 /* get hb checker to deactivate from the remote partition */
1398 spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1399 if (part_uv
->act_state_req
== 0)
1400 xpc_activate_IRQ_rcvd
++;
1401 part_uv
->act_state_req
= XPC_P_ASR_DEACTIVATE_UV
;
1402 part_uv
->reason
= xpBadChannelNumber
;
1403 spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock
, irq_flags
);
1405 wake_up_interruptible(&xpc_activate_IRQ_wq
);
1409 ch
= &part
->channels
[ch_number
];
1410 xpc_msgqueue_ref(ch
);
1412 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1413 xpc_msgqueue_deref(ch
);
1417 /* see if we're really dealing with an ACK for a previously sent msg */
1418 if (msg
->hdr
.size
== 0) {
1419 xpc_handle_notify_mq_ack_uv(ch
, msg
);
1420 xpc_msgqueue_deref(ch
);
1424 /* we're dealing with a normal message sent via the notify_mq */
1427 msg_slot
= ch_uv
->recv_msg_slots
+
1428 (msg
->hdr
.msg_slot_number
% ch
->remote_nentries
) * ch
->entry_size
;
1430 BUG_ON(msg
->hdr
.msg_slot_number
!= msg_slot
->hdr
.msg_slot_number
);
1431 BUG_ON(msg_slot
->hdr
.size
!= 0);
1433 memcpy(msg_slot
, msg
, msg
->hdr
.size
);
1435 xpc_put_fifo_entry_uv(&ch_uv
->recv_msg_list
, &msg_slot
->hdr
.u
.next
);
1437 if (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
) {
1439 * If there is an existing idle kthread get it to deliver
1440 * the payload, otherwise we'll have to get the channel mgr
1441 * for this partition to create a kthread to do the delivery.
1443 if (atomic_read(&ch
->kthreads_idle
) > 0)
1444 wake_up_nr(&ch
->idle_wq
, 1);
1446 xpc_send_chctl_local_msgrequest_uv(part
, ch
->number
);
1448 xpc_msgqueue_deref(ch
);
1452 xpc_handle_notify_IRQ_uv(int irq
, void *dev_id
)
1454 struct xpc_notify_mq_msg_uv
*msg
;
1456 struct xpc_partition
*part
;
1458 while ((msg
= gru_get_next_message(xpc_notify_mq_uv
->gru_mq_desc
)) !=
1461 partid
= msg
->hdr
.partid
;
1462 if (partid
< 0 || partid
>= XP_MAX_NPARTITIONS_UV
) {
1463 dev_err(xpc_part
, "xpc_handle_notify_IRQ_uv() received "
1464 "invalid partid=0x%x in message\n", partid
);
1466 part
= &xpc_partitions
[partid
];
1468 if (xpc_part_ref(part
)) {
1469 xpc_handle_notify_mq_msg_uv(part
, msg
);
1470 xpc_part_deref(part
);
1474 gru_free_message(xpc_notify_mq_uv
->gru_mq_desc
, msg
);
1481 xpc_n_of_deliverable_payloads_uv(struct xpc_channel
*ch
)
1483 return xpc_n_of_fifo_entries_uv(&ch
->sn
.uv
.recv_msg_list
);
1487 xpc_process_msg_chctl_flags_uv(struct xpc_partition
*part
, int ch_number
)
1489 struct xpc_channel
*ch
= &part
->channels
[ch_number
];
1490 int ndeliverable_payloads
;
1492 xpc_msgqueue_ref(ch
);
1494 ndeliverable_payloads
= xpc_n_of_deliverable_payloads_uv(ch
);
1496 if (ndeliverable_payloads
> 0 &&
1497 (ch
->flags
& XPC_C_CONNECTED
) &&
1498 (ch
->flags
& XPC_C_CONNECTEDCALLOUT_MADE
)) {
1500 xpc_activate_kthreads(ch
, ndeliverable_payloads
);
1503 xpc_msgqueue_deref(ch
);
1506 static enum xp_retval
1507 xpc_send_payload_uv(struct xpc_channel
*ch
, u32 flags
, void *payload
,
1508 u16 payload_size
, u8 notify_type
, xpc_notify_func func
,
1511 enum xp_retval ret
= xpSuccess
;
1512 struct xpc_send_msg_slot_uv
*msg_slot
= NULL
;
1513 struct xpc_notify_mq_msg_uv
*msg
;
1514 u8 msg_buffer
[XPC_NOTIFY_MSG_SIZE_UV
];
1517 DBUG_ON(notify_type
!= XPC_N_CALL
);
1519 msg_size
= sizeof(struct xpc_notify_mq_msghdr_uv
) + payload_size
;
1520 if (msg_size
> ch
->entry_size
)
1521 return xpPayloadTooBig
;
1523 xpc_msgqueue_ref(ch
);
1525 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1529 if (!(ch
->flags
& XPC_C_CONNECTED
)) {
1530 ret
= xpNotConnected
;
1534 ret
= xpc_allocate_msg_slot_uv(ch
, flags
, &msg_slot
);
1535 if (ret
!= xpSuccess
)
1539 atomic_inc(&ch
->n_to_notify
);
1541 msg_slot
->key
= key
;
1542 smp_wmb(); /* a non-NULL func must hit memory after the key */
1543 msg_slot
->func
= func
;
1545 if (ch
->flags
& XPC_C_DISCONNECTING
) {
1551 msg
= (struct xpc_notify_mq_msg_uv
*)&msg_buffer
;
1552 msg
->hdr
.partid
= xp_partition_id
;
1553 msg
->hdr
.ch_number
= ch
->number
;
1554 msg
->hdr
.size
= msg_size
;
1555 msg
->hdr
.msg_slot_number
= msg_slot
->msg_slot_number
;
1556 memcpy(&msg
->payload
, payload
, payload_size
);
1558 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1560 if (ret
== xpSuccess
)
1563 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1567 * Try to NULL the msg_slot's func field. If we fail, then
1568 * xpc_notify_senders_of_disconnect_uv() beat us to it, in which
1569 * case we need to pretend we succeeded to send the message
1570 * since the user will get a callout for the disconnect error
1571 * by xpc_notify_senders_of_disconnect_uv(), and to also get an
1572 * error returned here will confuse them. Additionally, since
1573 * in this case the channel is being disconnected we don't need
1574 * to put the the msg_slot back on the free list.
1576 if (cmpxchg(&msg_slot
->func
, func
, NULL
) != func
) {
1581 msg_slot
->key
= NULL
;
1582 atomic_dec(&ch
->n_to_notify
);
1584 xpc_free_msg_slot_uv(ch
, msg_slot
);
1586 xpc_msgqueue_deref(ch
);
1591 * Tell the callers of xpc_send_notify() that the status of their payloads
1592 * is unknown because the channel is now disconnecting.
1594 * We don't worry about putting these msg_slots on the free list since the
1595 * msg_slots themselves are about to be kfree'd.
1598 xpc_notify_senders_of_disconnect_uv(struct xpc_channel
*ch
)
1600 struct xpc_send_msg_slot_uv
*msg_slot
;
1603 DBUG_ON(!(ch
->flags
& XPC_C_DISCONNECTING
));
1605 for (entry
= 0; entry
< ch
->local_nentries
; entry
++) {
1607 if (atomic_read(&ch
->n_to_notify
) == 0)
1610 msg_slot
= &ch
->sn
.uv
.send_msg_slots
[entry
];
1611 if (msg_slot
->func
!= NULL
)
1612 xpc_notify_sender_uv(ch
, msg_slot
, ch
->reason
);
1617 * Get the next deliverable message's payload.
1620 xpc_get_deliverable_payload_uv(struct xpc_channel
*ch
)
1622 struct xpc_fifo_entry_uv
*entry
;
1623 struct xpc_notify_mq_msg_uv
*msg
;
1624 void *payload
= NULL
;
1626 if (!(ch
->flags
& XPC_C_DISCONNECTING
)) {
1627 entry
= xpc_get_fifo_entry_uv(&ch
->sn
.uv
.recv_msg_list
);
1628 if (entry
!= NULL
) {
1629 msg
= container_of(entry
, struct xpc_notify_mq_msg_uv
,
1631 payload
= &msg
->payload
;
1638 xpc_received_payload_uv(struct xpc_channel
*ch
, void *payload
)
1640 struct xpc_notify_mq_msg_uv
*msg
;
1643 msg
= container_of(payload
, struct xpc_notify_mq_msg_uv
, payload
);
1645 /* return an ACK to the sender of this message */
1647 msg
->hdr
.partid
= xp_partition_id
;
1648 msg
->hdr
.size
= 0; /* size of zero indicates this is an ACK */
1650 ret
= xpc_send_gru_msg(ch
->sn
.uv
.cached_notify_gru_mq_desc
, msg
,
1651 sizeof(struct xpc_notify_mq_msghdr_uv
));
1652 if (ret
!= xpSuccess
)
1653 XPC_DEACTIVATE_PARTITION(&xpc_partitions
[ch
->partid
], ret
);
1655 msg
->hdr
.msg_slot_number
+= ch
->remote_nentries
;
1658 static struct xpc_arch_operations xpc_arch_ops_uv
= {
1659 .setup_partitions
= xpc_setup_partitions_uv
,
1660 .teardown_partitions
= xpc_teardown_partitions_uv
,
1661 .process_activate_IRQ_rcvd
= xpc_process_activate_IRQ_rcvd_uv
,
1662 .get_partition_rsvd_page_pa
= xpc_get_partition_rsvd_page_pa_uv
,
1663 .setup_rsvd_page
= xpc_setup_rsvd_page_uv
,
1665 .allow_hb
= xpc_allow_hb_uv
,
1666 .disallow_hb
= xpc_disallow_hb_uv
,
1667 .disallow_all_hbs
= xpc_disallow_all_hbs_uv
,
1668 .increment_heartbeat
= xpc_increment_heartbeat_uv
,
1669 .offline_heartbeat
= xpc_offline_heartbeat_uv
,
1670 .online_heartbeat
= xpc_online_heartbeat_uv
,
1671 .heartbeat_init
= xpc_heartbeat_init_uv
,
1672 .heartbeat_exit
= xpc_heartbeat_exit_uv
,
1673 .get_remote_heartbeat
= xpc_get_remote_heartbeat_uv
,
1675 .request_partition_activation
=
1676 xpc_request_partition_activation_uv
,
1677 .request_partition_reactivation
=
1678 xpc_request_partition_reactivation_uv
,
1679 .request_partition_deactivation
=
1680 xpc_request_partition_deactivation_uv
,
1681 .cancel_partition_deactivation_request
=
1682 xpc_cancel_partition_deactivation_request_uv
,
1684 .setup_ch_structures
= xpc_setup_ch_structures_uv
,
1685 .teardown_ch_structures
= xpc_teardown_ch_structures_uv
,
1687 .make_first_contact
= xpc_make_first_contact_uv
,
1689 .get_chctl_all_flags
= xpc_get_chctl_all_flags_uv
,
1690 .send_chctl_closerequest
= xpc_send_chctl_closerequest_uv
,
1691 .send_chctl_closereply
= xpc_send_chctl_closereply_uv
,
1692 .send_chctl_openrequest
= xpc_send_chctl_openrequest_uv
,
1693 .send_chctl_openreply
= xpc_send_chctl_openreply_uv
,
1694 .send_chctl_opencomplete
= xpc_send_chctl_opencomplete_uv
,
1695 .process_msg_chctl_flags
= xpc_process_msg_chctl_flags_uv
,
1697 .save_remote_msgqueue_pa
= xpc_save_remote_msgqueue_pa_uv
,
1699 .setup_msg_structures
= xpc_setup_msg_structures_uv
,
1700 .teardown_msg_structures
= xpc_teardown_msg_structures_uv
,
1702 .indicate_partition_engaged
= xpc_indicate_partition_engaged_uv
,
1703 .indicate_partition_disengaged
= xpc_indicate_partition_disengaged_uv
,
1704 .assume_partition_disengaged
= xpc_assume_partition_disengaged_uv
,
1705 .partition_engaged
= xpc_partition_engaged_uv
,
1706 .any_partition_engaged
= xpc_any_partition_engaged_uv
,
1708 .n_of_deliverable_payloads
= xpc_n_of_deliverable_payloads_uv
,
1709 .send_payload
= xpc_send_payload_uv
,
1710 .get_deliverable_payload
= xpc_get_deliverable_payload_uv
,
1711 .received_payload
= xpc_received_payload_uv
,
1712 .notify_senders_of_disconnect
= xpc_notify_senders_of_disconnect_uv
,
1718 xpc_arch_ops
= xpc_arch_ops_uv
;
1720 if (sizeof(struct xpc_notify_mq_msghdr_uv
) > XPC_MSG_HDR_MAX_SIZE
) {
1721 dev_err(xpc_part
, "xpc_notify_mq_msghdr_uv is larger than %d\n",
1722 XPC_MSG_HDR_MAX_SIZE
);
1726 xpc_activate_mq_uv
= xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV
, 0,
1727 XPC_ACTIVATE_IRQ_NAME
,
1728 xpc_handle_activate_IRQ_uv
);
1729 if (IS_ERR(xpc_activate_mq_uv
))
1730 return PTR_ERR(xpc_activate_mq_uv
);
1732 xpc_notify_mq_uv
= xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV
, 0,
1733 XPC_NOTIFY_IRQ_NAME
,
1734 xpc_handle_notify_IRQ_uv
);
1735 if (IS_ERR(xpc_notify_mq_uv
)) {
1736 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);
1737 return PTR_ERR(xpc_notify_mq_uv
);
1746 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv
);
1747 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv
);