2 * Hyper-V guest/hypervisor interaction
4 * Copyright (c) 2015-2018 Virtuozzo International GmbH.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include "qemu/main-loop.h"
12 #include "qemu/module.h"
13 #include "qapi/error.h"
14 #include "exec/address-spaces.h"
15 #include "sysemu/kvm.h"
16 #include "qemu/bitops.h"
17 #include "qemu/error-report.h"
18 #include "qemu/lockable.h"
19 #include "qemu/queue.h"
21 #include "qemu/rcu_queue.h"
22 #include "hw/hyperv/hyperv.h"
23 #include "qom/object.h"
26 DeviceState parent_obj
;
32 hwaddr event_page_addr
;
33 MemoryRegion msg_page_mr
;
34 MemoryRegion event_page_mr
;
35 struct hyperv_message_page
*msg_page
;
36 struct hyperv_event_flags_page
*event_page
;
38 QemuMutex sint_routes_mutex
;
39 QLIST_HEAD(, HvSintRoute
) sint_routes
;
42 #define TYPE_SYNIC "hyperv-synic"
43 OBJECT_DECLARE_SIMPLE_TYPE(SynICState
, SYNIC
)
45 static bool synic_enabled
;
47 bool hyperv_is_synic_enabled(void)
52 static SynICState
*get_synic(CPUState
*cs
)
54 return SYNIC(object_resolve_path_component(OBJECT(cs
), "synic"));
57 static void synic_update(SynICState
*synic
, bool sctl_enable
,
58 hwaddr msg_page_addr
, hwaddr event_page_addr
)
61 synic
->sctl_enabled
= sctl_enable
;
62 if (synic
->msg_page_addr
!= msg_page_addr
) {
63 if (synic
->msg_page_addr
) {
64 memory_region_del_subregion(get_system_memory(),
68 memory_region_add_subregion(get_system_memory(), msg_page_addr
,
71 synic
->msg_page_addr
= msg_page_addr
;
73 if (synic
->event_page_addr
!= event_page_addr
) {
74 if (synic
->event_page_addr
) {
75 memory_region_del_subregion(get_system_memory(),
76 &synic
->event_page_mr
);
78 if (event_page_addr
) {
79 memory_region_add_subregion(get_system_memory(), event_page_addr
,
80 &synic
->event_page_mr
);
82 synic
->event_page_addr
= event_page_addr
;
86 void hyperv_synic_update(CPUState
*cs
, bool sctl_enable
,
87 hwaddr msg_page_addr
, hwaddr event_page_addr
)
89 SynICState
*synic
= get_synic(cs
);
95 synic_update(synic
, sctl_enable
, msg_page_addr
, event_page_addr
);
98 static void synic_realize(DeviceState
*dev
, Error
**errp
)
100 Object
*obj
= OBJECT(dev
);
101 SynICState
*synic
= SYNIC(dev
);
102 char *msgp_name
, *eventp_name
;
105 /* memory region names have to be globally unique */
106 vp_index
= hyperv_vp_index(synic
->cs
);
107 msgp_name
= g_strdup_printf("synic-%u-msg-page", vp_index
);
108 eventp_name
= g_strdup_printf("synic-%u-event-page", vp_index
);
110 memory_region_init_ram(&synic
->msg_page_mr
, obj
, msgp_name
,
111 sizeof(*synic
->msg_page
), &error_abort
);
112 memory_region_init_ram(&synic
->event_page_mr
, obj
, eventp_name
,
113 sizeof(*synic
->event_page
), &error_abort
);
114 synic
->msg_page
= memory_region_get_ram_ptr(&synic
->msg_page_mr
);
115 synic
->event_page
= memory_region_get_ram_ptr(&synic
->event_page_mr
);
116 qemu_mutex_init(&synic
->sint_routes_mutex
);
117 QLIST_INIT(&synic
->sint_routes
);
123 static void synic_reset(DeviceState
*dev
)
125 SynICState
*synic
= SYNIC(dev
);
126 memset(synic
->msg_page
, 0, sizeof(*synic
->msg_page
));
127 memset(synic
->event_page
, 0, sizeof(*synic
->event_page
));
128 synic_update(synic
, false, 0, 0);
129 assert(QLIST_EMPTY(&synic
->sint_routes
));
132 static void synic_class_init(ObjectClass
*klass
, void *data
)
134 DeviceClass
*dc
= DEVICE_CLASS(klass
);
136 dc
->realize
= synic_realize
;
137 dc
->reset
= synic_reset
;
138 dc
->user_creatable
= false;
141 void hyperv_synic_add(CPUState
*cs
)
146 obj
= object_new(TYPE_SYNIC
);
149 object_property_add_child(OBJECT(cs
), "synic", obj
);
151 qdev_realize(DEVICE(obj
), NULL
, &error_abort
);
152 synic_enabled
= true;
155 void hyperv_synic_reset(CPUState
*cs
)
157 SynICState
*synic
= get_synic(cs
);
160 device_legacy_reset(DEVICE(synic
));
164 static const TypeInfo synic_type_info
= {
166 .parent
= TYPE_DEVICE
,
167 .instance_size
= sizeof(SynICState
),
168 .class_init
= synic_class_init
,
171 static void synic_register_types(void)
173 type_register_static(&synic_type_info
);
176 type_init(synic_register_types
)
179 * KVM has its own message producers (SynIC timers). To guarantee
180 * serialization with both KVM vcpu and the guest cpu, the messages are first
181 * staged in an intermediate area and then posted to the SynIC message page in
184 typedef struct HvSintStagedMessage
{
185 /* message content staged by hyperv_post_msg */
186 struct hyperv_message msg
;
187 /* callback + data (r/o) to complete the processing in a BH */
190 /* message posting status filled by cpu_post_msg */
192 /* passing the buck: */
197 * hyperv_post_msg (e.g. in main loop) grabs the staged area (FREE ->
198 * BUSY), copies msg, and schedules cpu_post_msg on the assigned cpu
202 * cpu_post_msg (vcpu thread) tries to copy staged msg to msg slot,
203 * notify the guest, records the status, marks the posting done (BUSY
204 * -> POSTED), and schedules sint_msg_bh BH
206 HV_STAGED_MSG_POSTED
,
208 * sint_msg_bh (BH) verifies that the posting is done, runs the
209 * callback, and starts over (POSTED -> FREE)
212 } HvSintStagedMessage
;
218 EventNotifier sint_set_notifier
;
219 EventNotifier sint_ack_notifier
;
221 HvSintStagedMessage
*staged_msg
;
224 QLIST_ENTRY(HvSintRoute
) link
;
227 static CPUState
*hyperv_find_vcpu(uint32_t vp_index
)
229 CPUState
*cs
= qemu_get_cpu(vp_index
);
230 assert(hyperv_vp_index(cs
) == vp_index
);
235 * BH to complete the processing of a staged message.
237 static void sint_msg_bh(void *opaque
)
239 HvSintRoute
*sint_route
= opaque
;
240 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
242 if (qatomic_read(&staged_msg
->state
) != HV_STAGED_MSG_POSTED
) {
243 /* status nor ready yet (spurious ack from guest?), ignore */
247 staged_msg
->cb(staged_msg
->cb_data
, staged_msg
->status
);
248 staged_msg
->status
= 0;
250 /* staged message processing finished, ready to start over */
251 qatomic_set(&staged_msg
->state
, HV_STAGED_MSG_FREE
);
252 /* drop the reference taken in hyperv_post_msg */
253 hyperv_sint_route_unref(sint_route
);
257 * Worker to transfer the message from the staging area into the SynIC message
258 * page in vcpu context.
260 static void cpu_post_msg(CPUState
*cs
, run_on_cpu_data data
)
262 HvSintRoute
*sint_route
= data
.host_ptr
;
263 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
264 SynICState
*synic
= sint_route
->synic
;
265 struct hyperv_message
*dst_msg
;
266 bool wait_for_sint_ack
= false;
268 assert(staged_msg
->state
== HV_STAGED_MSG_BUSY
);
270 if (!synic
->msg_page_addr
) {
271 staged_msg
->status
= -ENXIO
;
275 dst_msg
= &synic
->msg_page
->slot
[sint_route
->sint
];
277 if (dst_msg
->header
.message_type
!= HV_MESSAGE_NONE
) {
278 dst_msg
->header
.message_flags
|= HV_MESSAGE_FLAG_PENDING
;
279 staged_msg
->status
= -EAGAIN
;
280 wait_for_sint_ack
= true;
282 memcpy(dst_msg
, &staged_msg
->msg
, sizeof(*dst_msg
));
283 staged_msg
->status
= hyperv_sint_route_set_sint(sint_route
);
286 memory_region_set_dirty(&synic
->msg_page_mr
, 0, sizeof(*synic
->msg_page
));
289 qatomic_set(&staged_msg
->state
, HV_STAGED_MSG_POSTED
);
291 * Notify the msg originator of the progress made; if the slot was busy we
292 * set msg_pending flag in it so it will be the guest who will do EOM and
293 * trigger the notification from KVM via sint_ack_notifier
295 if (!wait_for_sint_ack
) {
296 aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh
,
302 * Post a Hyper-V message to the staging area, for delivery to guest in the
305 int hyperv_post_msg(HvSintRoute
*sint_route
, struct hyperv_message
*src_msg
)
307 HvSintStagedMessage
*staged_msg
= sint_route
->staged_msg
;
311 /* grab the staging area */
312 if (qatomic_cmpxchg(&staged_msg
->state
, HV_STAGED_MSG_FREE
,
313 HV_STAGED_MSG_BUSY
) != HV_STAGED_MSG_FREE
) {
317 memcpy(&staged_msg
->msg
, src_msg
, sizeof(*src_msg
));
319 /* hold a reference on sint_route until the callback is finished */
320 hyperv_sint_route_ref(sint_route
);
322 /* schedule message posting attempt in vcpu thread */
323 async_run_on_cpu(sint_route
->synic
->cs
, cpu_post_msg
,
324 RUN_ON_CPU_HOST_PTR(sint_route
));
328 static void sint_ack_handler(EventNotifier
*notifier
)
330 HvSintRoute
*sint_route
= container_of(notifier
, HvSintRoute
,
332 event_notifier_test_and_clear(notifier
);
335 * the guest consumed the previous message so complete the current one with
336 * -EAGAIN and let the msg originator retry
338 aio_bh_schedule_oneshot(qemu_get_aio_context(), sint_msg_bh
, sint_route
);
342 * Set given event flag for a given sint on a given vcpu, and signal the sint.
344 int hyperv_set_event_flag(HvSintRoute
*sint_route
, unsigned eventno
)
347 SynICState
*synic
= sint_route
->synic
;
348 unsigned long *flags
, set_mask
;
351 if (eventno
> HV_EVENT_FLAGS_COUNT
) {
354 if (!synic
->sctl_enabled
|| !synic
->event_page_addr
) {
358 set_idx
= BIT_WORD(eventno
);
359 set_mask
= BIT_MASK(eventno
);
360 flags
= synic
->event_page
->slot
[sint_route
->sint
].flags
;
362 if ((qatomic_fetch_or(&flags
[set_idx
], set_mask
) & set_mask
) != set_mask
) {
363 memory_region_set_dirty(&synic
->event_page_mr
, 0,
364 sizeof(*synic
->event_page
));
365 ret
= hyperv_sint_route_set_sint(sint_route
);
372 HvSintRoute
*hyperv_sint_route_new(uint32_t vp_index
, uint32_t sint
,
373 HvSintMsgCb cb
, void *cb_data
)
375 HvSintRoute
*sint_route
= NULL
;
376 EventNotifier
*ack_notifier
= NULL
;
380 bool ack_event_initialized
= false;
382 cs
= hyperv_find_vcpu(vp_index
);
387 synic
= get_synic(cs
);
392 sint_route
= g_new0(HvSintRoute
, 1);
397 sint_route
->synic
= synic
;
398 sint_route
->sint
= sint
;
399 sint_route
->refcount
= 1;
401 ack_notifier
= cb
? &sint_route
->sint_ack_notifier
: NULL
;
403 sint_route
->staged_msg
= g_new0(HvSintStagedMessage
, 1);
404 if (!sint_route
->staged_msg
) {
405 goto cleanup_err_sint
;
407 sint_route
->staged_msg
->cb
= cb
;
408 sint_route
->staged_msg
->cb_data
= cb_data
;
410 r
= event_notifier_init(ack_notifier
, false);
412 goto cleanup_err_sint
;
414 event_notifier_set_handler(ack_notifier
, sint_ack_handler
);
415 ack_event_initialized
= true;
418 /* See if we are done or we need to setup a GSI for this SintRoute */
419 if (!synic
->sctl_enabled
) {
423 /* We need to setup a GSI for this SintRoute */
424 r
= event_notifier_init(&sint_route
->sint_set_notifier
, false);
426 goto cleanup_err_sint
;
429 gsi
= kvm_irqchip_add_hv_sint_route(kvm_state
, vp_index
, sint
);
431 goto cleanup_err_sint_notifier
;
434 r
= kvm_irqchip_add_irqfd_notifier_gsi(kvm_state
,
435 &sint_route
->sint_set_notifier
,
438 goto cleanup_err_irqfd
;
440 sint_route
->gsi
= gsi
;
442 qemu_mutex_lock(&synic
->sint_routes_mutex
);
443 QLIST_INSERT_HEAD(&synic
->sint_routes
, sint_route
, link
);
444 qemu_mutex_unlock(&synic
->sint_routes_mutex
);
448 kvm_irqchip_release_virq(kvm_state
, gsi
);
450 cleanup_err_sint_notifier
:
451 event_notifier_cleanup(&sint_route
->sint_set_notifier
);
455 if (ack_event_initialized
) {
456 event_notifier_set_handler(ack_notifier
, NULL
);
457 event_notifier_cleanup(ack_notifier
);
460 g_free(sint_route
->staged_msg
);
467 void hyperv_sint_route_ref(HvSintRoute
*sint_route
)
469 sint_route
->refcount
++;
472 void hyperv_sint_route_unref(HvSintRoute
*sint_route
)
480 assert(sint_route
->refcount
> 0);
482 if (--sint_route
->refcount
) {
486 synic
= sint_route
->synic
;
487 qemu_mutex_lock(&synic
->sint_routes_mutex
);
488 QLIST_REMOVE(sint_route
, link
);
489 qemu_mutex_unlock(&synic
->sint_routes_mutex
);
491 if (sint_route
->gsi
) {
492 kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state
,
493 &sint_route
->sint_set_notifier
,
495 kvm_irqchip_release_virq(kvm_state
, sint_route
->gsi
);
496 event_notifier_cleanup(&sint_route
->sint_set_notifier
);
499 if (sint_route
->staged_msg
) {
500 event_notifier_set_handler(&sint_route
->sint_ack_notifier
, NULL
);
501 event_notifier_cleanup(&sint_route
->sint_ack_notifier
);
502 g_free(sint_route
->staged_msg
);
507 int hyperv_sint_route_set_sint(HvSintRoute
*sint_route
)
509 if (!sint_route
->gsi
) {
513 return event_notifier_set(&sint_route
->sint_set_notifier
);
516 typedef struct MsgHandler
{
518 QLIST_ENTRY(MsgHandler
) link
;
520 HvMsgHandler handler
;
524 typedef struct EventFlagHandler
{
526 QLIST_ENTRY(EventFlagHandler
) link
;
528 EventNotifier
*notifier
;
531 static QLIST_HEAD(, MsgHandler
) msg_handlers
;
532 static QLIST_HEAD(, EventFlagHandler
) event_flag_handlers
;
533 static QemuMutex handlers_mutex
;
535 static void __attribute__((constructor
)) hv_init(void)
537 QLIST_INIT(&msg_handlers
);
538 QLIST_INIT(&event_flag_handlers
);
539 qemu_mutex_init(&handlers_mutex
);
542 int hyperv_set_msg_handler(uint32_t conn_id
, HvMsgHandler handler
, void *data
)
547 QEMU_LOCK_GUARD(&handlers_mutex
);
548 QLIST_FOREACH(mh
, &msg_handlers
, link
) {
549 if (mh
->conn_id
== conn_id
) {
553 QLIST_REMOVE_RCU(mh
, link
);
562 mh
= g_new(MsgHandler
, 1);
563 mh
->conn_id
= conn_id
;
564 mh
->handler
= handler
;
566 QLIST_INSERT_HEAD_RCU(&msg_handlers
, mh
, link
);
575 uint16_t hyperv_hcall_post_message(uint64_t param
, bool fast
)
579 struct hyperv_post_message_input
*msg
;
583 return HV_STATUS_INVALID_HYPERCALL_CODE
;
585 if (param
& (__alignof__(*msg
) - 1)) {
586 return HV_STATUS_INVALID_ALIGNMENT
;
590 msg
= cpu_physical_memory_map(param
, &len
, 0);
591 if (len
< sizeof(*msg
)) {
592 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
595 if (msg
->payload_size
> sizeof(msg
->payload
)) {
596 ret
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
600 ret
= HV_STATUS_INVALID_CONNECTION_ID
;
601 WITH_RCU_READ_LOCK_GUARD() {
602 QLIST_FOREACH_RCU(mh
, &msg_handlers
, link
) {
603 if (mh
->conn_id
== (msg
->connection_id
& HV_CONNECTION_ID_MASK
)) {
604 ret
= mh
->handler(msg
, mh
->data
);
611 cpu_physical_memory_unmap(msg
, len
, 0, 0);
615 static int set_event_flag_handler(uint32_t conn_id
, EventNotifier
*notifier
)
618 EventFlagHandler
*handler
;
620 QEMU_LOCK_GUARD(&handlers_mutex
);
621 QLIST_FOREACH(handler
, &event_flag_handlers
, link
) {
622 if (handler
->conn_id
== conn_id
) {
626 QLIST_REMOVE_RCU(handler
, link
);
627 g_free_rcu(handler
, rcu
);
635 handler
= g_new(EventFlagHandler
, 1);
636 handler
->conn_id
= conn_id
;
637 handler
->notifier
= notifier
;
638 QLIST_INSERT_HEAD_RCU(&event_flag_handlers
, handler
, link
);
647 static bool process_event_flags_userspace
;
649 int hyperv_set_event_flag_handler(uint32_t conn_id
, EventNotifier
*notifier
)
651 if (!process_event_flags_userspace
&&
652 !kvm_check_extension(kvm_state
, KVM_CAP_HYPERV_EVENTFD
)) {
653 process_event_flags_userspace
= true;
655 warn_report("Hyper-V event signaling is not supported by this kernel; "
656 "using slower userspace hypercall processing");
659 if (!process_event_flags_userspace
) {
660 struct kvm_hyperv_eventfd hvevfd
= {
662 .fd
= notifier
? event_notifier_get_fd(notifier
) : -1,
663 .flags
= notifier
? 0 : KVM_HYPERV_EVENTFD_DEASSIGN
,
666 return kvm_vm_ioctl(kvm_state
, KVM_HYPERV_EVENTFD
, &hvevfd
);
668 return set_event_flag_handler(conn_id
, notifier
);
671 uint16_t hyperv_hcall_signal_event(uint64_t param
, bool fast
)
673 EventFlagHandler
*handler
;
675 if (unlikely(!fast
)) {
678 if (addr
& (__alignof__(addr
) - 1)) {
679 return HV_STATUS_INVALID_ALIGNMENT
;
682 param
= ldq_phys(&address_space_memory
, addr
);
686 * Per spec, bits 32-47 contain the extra "flag number". However, we
687 * have no use for it, and in all known usecases it is zero, so just
688 * report lookup failure if it isn't.
690 if (param
& 0xffff00000000ULL
) {
691 return HV_STATUS_INVALID_PORT_ID
;
693 /* remaining bits are reserved-zero */
694 if (param
& ~HV_CONNECTION_ID_MASK
) {
695 return HV_STATUS_INVALID_HYPERCALL_INPUT
;
698 RCU_READ_LOCK_GUARD();
699 QLIST_FOREACH_RCU(handler
, &event_flag_handlers
, link
) {
700 if (handler
->conn_id
== param
) {
701 event_notifier_set(handler
->notifier
);
705 return HV_STATUS_INVALID_CONNECTION_ID
;
708 static HvSynDbgHandler hv_syndbg_handler
;
709 static void *hv_syndbg_context
;
711 void hyperv_set_syndbg_handler(HvSynDbgHandler handler
, void *context
)
713 assert(!hv_syndbg_handler
);
714 hv_syndbg_handler
= handler
;
715 hv_syndbg_context
= context
;
718 uint16_t hyperv_hcall_reset_dbg_session(uint64_t outgpa
)
722 struct hyperv_reset_debug_session_output
*reset_dbg_session
= NULL
;
725 if (!hv_syndbg_handler
) {
726 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
730 len
= sizeof(*reset_dbg_session
);
731 reset_dbg_session
= cpu_physical_memory_map(outgpa
, &len
, 1);
732 if (!reset_dbg_session
|| len
< sizeof(*reset_dbg_session
)) {
733 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
737 msg
.type
= HV_SYNDBG_MSG_CONNECTION_INFO
;
738 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
743 reset_dbg_session
->host_ip
= msg
.u
.connection_info
.host_ip
;
744 reset_dbg_session
->host_port
= msg
.u
.connection_info
.host_port
;
745 /* The following fields are only used as validation for KDVM */
746 memset(&reset_dbg_session
->host_mac
, 0,
747 sizeof(reset_dbg_session
->host_mac
));
748 reset_dbg_session
->target_ip
= msg
.u
.connection_info
.host_ip
;
749 reset_dbg_session
->target_port
= msg
.u
.connection_info
.host_port
;
750 memset(&reset_dbg_session
->target_mac
, 0,
751 sizeof(reset_dbg_session
->target_mac
));
753 if (reset_dbg_session
) {
754 cpu_physical_memory_unmap(reset_dbg_session
,
755 sizeof(*reset_dbg_session
), 1, len
);
761 uint16_t hyperv_hcall_retreive_dbg_data(uint64_t ingpa
, uint64_t outgpa
,
765 struct hyperv_retrieve_debug_data_input
*debug_data_in
= NULL
;
766 struct hyperv_retrieve_debug_data_output
*debug_data_out
= NULL
;
767 hwaddr in_len
, out_len
;
770 if (fast
|| !hv_syndbg_handler
) {
771 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
775 in_len
= sizeof(*debug_data_in
);
776 debug_data_in
= cpu_physical_memory_map(ingpa
, &in_len
, 0);
777 if (!debug_data_in
|| in_len
< sizeof(*debug_data_in
)) {
778 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
782 out_len
= sizeof(*debug_data_out
);
783 debug_data_out
= cpu_physical_memory_map(outgpa
, &out_len
, 1);
784 if (!debug_data_out
|| out_len
< sizeof(*debug_data_out
)) {
785 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
789 msg
.type
= HV_SYNDBG_MSG_RECV
;
790 msg
.u
.recv
.buf_gpa
= outgpa
+ sizeof(*debug_data_out
);
791 msg
.u
.recv
.count
= TARGET_PAGE_SIZE
- sizeof(*debug_data_out
);
792 msg
.u
.recv
.options
= debug_data_in
->options
;
793 msg
.u
.recv
.timeout
= debug_data_in
->timeout
;
794 msg
.u
.recv
.is_raw
= true;
795 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
796 if (ret
== HV_STATUS_NO_DATA
) {
797 debug_data_out
->retrieved_count
= 0;
798 debug_data_out
->remaining_count
= debug_data_in
->count
;
800 } else if (ret
!= HV_STATUS_SUCCESS
) {
804 debug_data_out
->retrieved_count
= msg
.u
.recv
.retrieved_count
;
805 debug_data_out
->remaining_count
=
806 debug_data_in
->count
- msg
.u
.recv
.retrieved_count
;
808 if (debug_data_out
) {
809 cpu_physical_memory_unmap(debug_data_out
, sizeof(*debug_data_out
), 1,
814 cpu_physical_memory_unmap(debug_data_in
, sizeof(*debug_data_in
), 0,
821 uint16_t hyperv_hcall_post_dbg_data(uint64_t ingpa
, uint64_t outgpa
, bool fast
)
824 struct hyperv_post_debug_data_input
*post_data_in
= NULL
;
825 struct hyperv_post_debug_data_output
*post_data_out
= NULL
;
826 hwaddr in_len
, out_len
;
829 if (fast
|| !hv_syndbg_handler
) {
830 ret
= HV_STATUS_INVALID_HYPERCALL_CODE
;
834 in_len
= sizeof(*post_data_in
);
835 post_data_in
= cpu_physical_memory_map(ingpa
, &in_len
, 0);
836 if (!post_data_in
|| in_len
< sizeof(*post_data_in
)) {
837 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
841 if (post_data_in
->count
> TARGET_PAGE_SIZE
- sizeof(*post_data_in
)) {
842 ret
= HV_STATUS_INVALID_PARAMETER
;
846 out_len
= sizeof(*post_data_out
);
847 post_data_out
= cpu_physical_memory_map(outgpa
, &out_len
, 1);
848 if (!post_data_out
|| out_len
< sizeof(*post_data_out
)) {
849 ret
= HV_STATUS_INSUFFICIENT_MEMORY
;
853 msg
.type
= HV_SYNDBG_MSG_SEND
;
854 msg
.u
.send
.buf_gpa
= ingpa
+ sizeof(*post_data_in
);
855 msg
.u
.send
.count
= post_data_in
->count
;
856 msg
.u
.send
.is_raw
= true;
857 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
858 if (ret
!= HV_STATUS_SUCCESS
) {
862 post_data_out
->pending_count
= msg
.u
.send
.pending_count
;
863 ret
= post_data_out
->pending_count
? HV_STATUS_INSUFFICIENT_BUFFERS
:
867 cpu_physical_memory_unmap(post_data_out
,
868 sizeof(*post_data_out
), 1, out_len
);
872 cpu_physical_memory_unmap(post_data_in
,
873 sizeof(*post_data_in
), 0, in_len
);
879 uint32_t hyperv_syndbg_send(uint64_t ingpa
, uint32_t count
)
883 if (!hv_syndbg_handler
) {
884 return HV_SYNDBG_STATUS_INVALID
;
887 msg
.type
= HV_SYNDBG_MSG_SEND
;
888 msg
.u
.send
.buf_gpa
= ingpa
;
889 msg
.u
.send
.count
= count
;
890 msg
.u
.send
.is_raw
= false;
891 if (hv_syndbg_handler(hv_syndbg_context
, &msg
)) {
892 return HV_SYNDBG_STATUS_INVALID
;
895 return HV_SYNDBG_STATUS_SEND_SUCCESS
;
898 uint32_t hyperv_syndbg_recv(uint64_t ingpa
, uint32_t count
)
903 if (!hv_syndbg_handler
) {
904 return HV_SYNDBG_STATUS_INVALID
;
907 msg
.type
= HV_SYNDBG_MSG_RECV
;
908 msg
.u
.recv
.buf_gpa
= ingpa
;
909 msg
.u
.recv
.count
= count
;
910 msg
.u
.recv
.options
= 0;
911 msg
.u
.recv
.timeout
= 0;
912 msg
.u
.recv
.is_raw
= false;
913 ret
= hv_syndbg_handler(hv_syndbg_context
, &msg
);
914 if (ret
!= HV_STATUS_SUCCESS
) {
918 return HV_SYNDBG_STATUS_SET_SIZE(HV_SYNDBG_STATUS_RECV_SUCCESS
,
919 msg
.u
.recv
.retrieved_count
);
922 void hyperv_syndbg_set_pending_page(uint64_t ingpa
)
926 if (!hv_syndbg_handler
) {
930 msg
.type
= HV_SYNDBG_MSG_SET_PENDING_PAGE
;
931 msg
.u
.pending_page
.buf_gpa
= ingpa
;
932 hv_syndbg_handler(hv_syndbg_context
, &msg
);
935 uint64_t hyperv_syndbg_query_options(void)
939 if (!hv_syndbg_handler
) {
943 msg
.type
= HV_SYNDBG_MSG_QUERY_OPTIONS
;
944 if (hv_syndbg_handler(hv_syndbg_context
, &msg
) != HV_STATUS_SUCCESS
) {
948 return msg
.u
.query_options
.options
;