3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
29 #include "visorbus_private.h"
30 #include "vmcallinterface.h"
32 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
34 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
35 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
37 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
39 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
41 #define UNISYS_SPAR_LEAF_ID 0x40000000
43 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
44 #define UNISYS_SPAR_ID_EBX 0x73696e55
45 #define UNISYS_SPAR_ID_ECX 0x70537379
46 #define UNISYS_SPAR_ID_EDX 0x34367261
51 static int visorchipset_major
;
54 visorchipset_open(struct inode
*inode
, struct file
*file
)
56 unsigned int minor_number
= iminor(inode
);
64 visorchipset_release(struct inode
*inode
, struct file
*file
)
70 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
71 * we switch to slow polling mode. As soon as we get a controlvm
72 * message, we switch back to fast polling mode.
74 #define MIN_IDLE_SECONDS 10
75 static unsigned long poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
76 /* when we got our last controlvm message */
77 static unsigned long most_recent_message_jiffies
;
79 struct parser_context
{
80 unsigned long allocbytes
;
81 unsigned long param_bytes
;
83 unsigned long bytes_remaining
;
88 static struct delayed_work periodic_controlvm_work
;
90 static struct cdev file_cdev
;
91 static struct visorchannel
**file_controlvm_channel
;
93 static struct visorchannel
*controlvm_channel
;
95 /* Manages the request payload in the controlvm channel */
96 struct visor_controlvm_payload_info
{
97 u8
*ptr
; /* pointer to base address of payload pool */
99 * offset from beginning of controlvm
100 * channel to beginning of payload * pool
102 u32 bytes
; /* number of bytes in payload pool */
105 static struct visor_controlvm_payload_info controlvm_payload_info
;
106 static unsigned long controlvm_payload_bytes_buffered
;
109 * The following globals are used to handle the scenario where we are unable to
110 * offload the payload from a controlvm message due to memory requirements. In
111 * this scenario, we simply stash the controlvm message, then attempt to
112 * process it again the next time controlvm_periodic_work() runs.
114 static struct controlvm_message controlvm_pending_msg
;
115 static bool controlvm_pending_msg_valid
;
118 * This describes a buffer and its current state of transfer (e.g., how many
119 * bytes have already been supplied as putfile data, and how many bytes are
120 * remaining) for a putfile_request.
122 struct putfile_active_buffer
{
123 /* a payload from a controlvm message, containing a file data buffer */
124 struct parser_context
*parser_ctx
;
125 /* points within data area of parser_ctx to next byte of data */
126 size_t bytes_remaining
;
129 #define PUTFILE_REQUEST_SIG 0x0906101302281211
131 * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
132 * conversation. Structs of this type are dynamically linked into
133 * <Putfile_request_list>.
135 struct putfile_request
{
136 u64 sig
; /* PUTFILE_REQUEST_SIG */
138 /* header from original TransmitFile request */
139 struct controlvm_message_header controlvm_header
;
141 /* link to next struct putfile_request */
142 struct list_head next_putfile_request
;
145 * head of putfile_buffer_entry list, which describes the data to be
146 * supplied as putfile data;
147 * - this list is added to when controlvm messages come in that supply
149 * - this list is removed from via the hotplug program that is actually
150 * consuming these buffers to write as file data
152 struct list_head input_buffer_list
;
153 spinlock_t req_list_lock
; /* lock for input_buffer_list */
155 /* waiters for input_buffer_list to go non-empty */
156 wait_queue_head_t input_buffer_wq
;
158 /* data not yet read within current putfile_buffer_entry */
159 struct putfile_active_buffer active_buf
;
162 * <0 = failed, 0 = in-progress, >0 = successful;
163 * note that this must be set with req_list_lock, and if you set <0,
164 * it is your responsibility to also free up all of the other objects
165 * in this struct (like input_buffer_list, active_buf.parser_ctx)
166 * before releasing the lock
168 int completion_status
;
171 struct parahotplug_request
{
172 struct list_head list
;
174 unsigned long expiration
;
175 struct controlvm_message msg
;
178 /* info for /dev/visorchipset */
179 static dev_t major_dev
= -1; /*< indicates major num for device */
181 /* prototypes for attributes */
182 static ssize_t
toolaction_show(struct device
*dev
,
183 struct device_attribute
*attr
,
188 visorchannel_read(controlvm_channel
,
189 offsetof(struct spar_controlvm_channel_protocol
,
190 tool_action
), &tool_action
, sizeof(u8
));
191 return scnprintf(buf
, PAGE_SIZE
, "%u\n", tool_action
);
194 static ssize_t
toolaction_store(struct device
*dev
,
195 struct device_attribute
*attr
,
196 const char *buf
, size_t count
)
201 if (kstrtou8(buf
, 10, &tool_action
))
204 ret
= visorchannel_write
206 offsetof(struct spar_controlvm_channel_protocol
,
208 &tool_action
, sizeof(u8
));
214 static DEVICE_ATTR_RW(toolaction
);
216 static ssize_t
boottotool_show(struct device
*dev
,
217 struct device_attribute
*attr
,
220 struct efi_spar_indication efi_spar_indication
;
222 visorchannel_read(controlvm_channel
,
223 offsetof(struct spar_controlvm_channel_protocol
,
224 efi_spar_ind
), &efi_spar_indication
,
225 sizeof(struct efi_spar_indication
));
226 return scnprintf(buf
, PAGE_SIZE
, "%u\n",
227 efi_spar_indication
.boot_to_tool
);
230 static ssize_t
boottotool_store(struct device
*dev
,
231 struct device_attribute
*attr
,
232 const char *buf
, size_t count
)
235 struct efi_spar_indication efi_spar_indication
;
237 if (kstrtoint(buf
, 10, &val
))
240 efi_spar_indication
.boot_to_tool
= val
;
241 ret
= visorchannel_write
243 offsetof(struct spar_controlvm_channel_protocol
,
244 efi_spar_ind
), &(efi_spar_indication
),
245 sizeof(struct efi_spar_indication
));
251 static DEVICE_ATTR_RW(boottotool
);
253 static ssize_t
error_show(struct device
*dev
, struct device_attribute
*attr
,
258 visorchannel_read(controlvm_channel
,
259 offsetof(struct spar_controlvm_channel_protocol
,
261 &error
, sizeof(u32
));
262 return scnprintf(buf
, PAGE_SIZE
, "%i\n", error
);
265 static ssize_t
error_store(struct device
*dev
, struct device_attribute
*attr
,
266 const char *buf
, size_t count
)
271 if (kstrtou32(buf
, 10, &error
))
274 ret
= visorchannel_write
276 offsetof(struct spar_controlvm_channel_protocol
,
278 &error
, sizeof(u32
));
283 static DEVICE_ATTR_RW(error
);
285 static ssize_t
textid_show(struct device
*dev
, struct device_attribute
*attr
,
292 offsetof(struct spar_controlvm_channel_protocol
,
293 installation_text_id
),
294 &text_id
, sizeof(u32
));
295 return scnprintf(buf
, PAGE_SIZE
, "%i\n", text_id
);
298 static ssize_t
textid_store(struct device
*dev
, struct device_attribute
*attr
,
299 const char *buf
, size_t count
)
304 if (kstrtou32(buf
, 10, &text_id
))
307 ret
= visorchannel_write
309 offsetof(struct spar_controlvm_channel_protocol
,
310 installation_text_id
),
311 &text_id
, sizeof(u32
));
316 static DEVICE_ATTR_RW(textid
);
318 static ssize_t
remaining_steps_show(struct device
*dev
,
319 struct device_attribute
*attr
, char *buf
)
321 u16 remaining_steps
= 0;
323 visorchannel_read(controlvm_channel
,
324 offsetof(struct spar_controlvm_channel_protocol
,
325 installation_remaining_steps
),
326 &remaining_steps
, sizeof(u16
));
327 return scnprintf(buf
, PAGE_SIZE
, "%hu\n", remaining_steps
);
330 static ssize_t
remaining_steps_store(struct device
*dev
,
331 struct device_attribute
*attr
,
332 const char *buf
, size_t count
)
337 if (kstrtou16(buf
, 10, &remaining_steps
))
340 ret
= visorchannel_write
342 offsetof(struct spar_controlvm_channel_protocol
,
343 installation_remaining_steps
),
344 &remaining_steps
, sizeof(u16
));
349 static DEVICE_ATTR_RW(remaining_steps
);
352 parser_id_get(struct parser_context
*ctx
)
354 struct spar_controlvm_parameters_header
*phdr
= NULL
;
358 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
363 * Describes the state from the perspective of which controlvm messages have
364 * been received for a bus or device.
367 enum PARSER_WHICH_STRING
{
368 PARSERSTRING_INITIATOR
,
370 PARSERSTRING_CONNECTION
,
371 PARSERSTRING_NAME
, /* TODO: only PARSERSTRING_NAME is used ? */
375 parser_param_start(struct parser_context
*ctx
,
376 enum PARSER_WHICH_STRING which_string
)
378 struct spar_controlvm_parameters_header
*phdr
= NULL
;
383 phdr
= (struct spar_controlvm_parameters_header
*)(ctx
->data
);
384 switch (which_string
) {
385 case PARSERSTRING_INITIATOR
:
386 ctx
->curr
= ctx
->data
+ phdr
->initiator_offset
;
387 ctx
->bytes_remaining
= phdr
->initiator_length
;
389 case PARSERSTRING_TARGET
:
390 ctx
->curr
= ctx
->data
+ phdr
->target_offset
;
391 ctx
->bytes_remaining
= phdr
->target_length
;
393 case PARSERSTRING_CONNECTION
:
394 ctx
->curr
= ctx
->data
+ phdr
->connection_offset
;
395 ctx
->bytes_remaining
= phdr
->connection_length
;
397 case PARSERSTRING_NAME
:
398 ctx
->curr
= ctx
->data
+ phdr
->name_offset
;
399 ctx
->bytes_remaining
= phdr
->name_length
;
406 static void parser_done(struct parser_context
*ctx
)
410 controlvm_payload_bytes_buffered
-= ctx
->param_bytes
;
415 parser_string_get(struct parser_context
*ctx
)
419 int value_length
= -1;
426 nscan
= ctx
->bytes_remaining
;
431 for (i
= 0, value_length
= -1; i
< nscan
; i
++)
432 if (pscan
[i
] == '\0') {
436 if (value_length
< 0) /* '\0' was not included in the length */
437 value_length
= nscan
;
438 value
= kmalloc(value_length
+ 1, GFP_KERNEL
| __GFP_NORETRY
);
441 if (value_length
> 0)
442 memcpy(value
, pscan
, value_length
);
443 ((u8
*)(value
))[value_length
] = '\0';
447 struct visor_busdev
{
452 static int match_visorbus_dev_by_id(struct device
*dev
, void *data
)
454 struct visor_device
*vdev
= to_visor_device(dev
);
455 struct visor_busdev
*id
= data
;
456 u32 bus_no
= id
->bus_no
;
457 u32 dev_no
= id
->dev_no
;
459 if ((vdev
->chipset_bus_no
== bus_no
) &&
460 (vdev
->chipset_dev_no
== dev_no
))
466 struct visor_device
*visorbus_get_device_by_id(u32 bus_no
, u32 dev_no
,
467 struct visor_device
*from
)
470 struct device
*dev_start
= NULL
;
471 struct visor_device
*vdev
= NULL
;
472 struct visor_busdev id
= {
478 dev_start
= &from
->device
;
479 dev
= bus_find_device(&visorbus_type
, dev_start
, (void *)&id
,
480 match_visorbus_dev_by_id
);
482 vdev
= to_visor_device(dev
);
487 controlvm_init_response(struct controlvm_message
*msg
,
488 struct controlvm_message_header
*msg_hdr
, int response
)
490 memset(msg
, 0, sizeof(struct controlvm_message
));
491 memcpy(&msg
->hdr
, msg_hdr
, sizeof(struct controlvm_message_header
));
492 msg
->hdr
.payload_bytes
= 0;
493 msg
->hdr
.payload_vm_offset
= 0;
494 msg
->hdr
.payload_max_bytes
= 0;
496 msg
->hdr
.flags
.failed
= 1;
497 msg
->hdr
.completion_status
= (u32
)(-response
);
502 controlvm_respond_chipset_init(struct controlvm_message_header
*msg_hdr
,
504 enum ultra_chipset_feature features
)
506 struct controlvm_message outmsg
;
508 controlvm_init_response(&outmsg
, msg_hdr
, response
);
509 outmsg
.cmd
.init_chipset
.features
= features
;
510 return visorchannel_signalinsert(controlvm_channel
,
511 CONTROLVM_QUEUE_REQUEST
, &outmsg
);
515 chipset_init(struct controlvm_message
*inmsg
)
517 static int chipset_inited
;
518 enum ultra_chipset_feature features
= 0;
519 int rc
= CONTROLVM_RESP_SUCCESS
;
522 POSTCODE_LINUX(CHIPSET_INIT_ENTRY_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
523 if (chipset_inited
) {
524 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
529 POSTCODE_LINUX(CHIPSET_INIT_EXIT_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
532 * Set features to indicate we support parahotplug (if Command
535 features
= inmsg
->cmd
.init_chipset
.features
&
536 ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG
;
539 * Set the "reply" bit so Command knows this is a
540 * features-aware driver.
542 features
|= ULTRA_CHIPSET_FEATURE_REPLY
;
545 if (inmsg
->hdr
.flags
.response_expected
)
546 res
= controlvm_respond_chipset_init(&inmsg
->hdr
, rc
, features
);
552 controlvm_respond(struct controlvm_message_header
*msg_hdr
, int response
)
554 struct controlvm_message outmsg
;
556 controlvm_init_response(&outmsg
, msg_hdr
, response
);
557 if (outmsg
.hdr
.flags
.test_message
== 1)
560 return visorchannel_signalinsert(controlvm_channel
,
561 CONTROLVM_QUEUE_REQUEST
, &outmsg
);
564 static int controlvm_respond_physdev_changestate(
565 struct controlvm_message_header
*msg_hdr
, int response
,
566 struct spar_segment_state state
)
568 struct controlvm_message outmsg
;
570 controlvm_init_response(&outmsg
, msg_hdr
, response
);
571 outmsg
.cmd
.device_change_state
.state
= state
;
572 outmsg
.cmd
.device_change_state
.flags
.phys_device
= 1;
573 return visorchannel_signalinsert(controlvm_channel
,
574 CONTROLVM_QUEUE_REQUEST
, &outmsg
);
577 enum crash_obj_type
{
583 save_crash_message(struct controlvm_message
*msg
, enum crash_obj_type typ
)
585 u32 local_crash_msg_offset
;
586 u16 local_crash_msg_count
;
589 err
= visorchannel_read(controlvm_channel
,
590 offsetof(struct spar_controlvm_channel_protocol
,
591 saved_crash_message_count
),
592 &local_crash_msg_count
, sizeof(u16
));
594 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC
, 0, 0,
595 POSTCODE_SEVERITY_ERR
);
599 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
600 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC
, 0,
601 local_crash_msg_count
,
602 POSTCODE_SEVERITY_ERR
);
606 err
= visorchannel_read(controlvm_channel
,
607 offsetof(struct spar_controlvm_channel_protocol
,
608 saved_crash_message_offset
),
609 &local_crash_msg_offset
, sizeof(u32
));
611 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC
, 0, 0,
612 POSTCODE_SEVERITY_ERR
);
616 if (typ
== CRASH_BUS
) {
617 err
= visorchannel_write(controlvm_channel
,
618 local_crash_msg_offset
,
620 sizeof(struct controlvm_message
));
622 POSTCODE_LINUX(SAVE_MSG_BUS_FAILURE_PC
, 0, 0,
623 POSTCODE_SEVERITY_ERR
);
627 local_crash_msg_offset
+= sizeof(struct controlvm_message
);
628 err
= visorchannel_write(controlvm_channel
,
629 local_crash_msg_offset
,
631 sizeof(struct controlvm_message
));
633 POSTCODE_LINUX(SAVE_MSG_DEV_FAILURE_PC
, 0, 0,
634 POSTCODE_SEVERITY_ERR
);
642 bus_responder(enum controlvm_id cmd_id
,
643 struct controlvm_message_header
*pending_msg_hdr
,
646 if (!pending_msg_hdr
)
649 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
652 return controlvm_respond(pending_msg_hdr
, response
);
656 device_changestate_responder(enum controlvm_id cmd_id
,
657 struct visor_device
*p
, int response
,
658 struct spar_segment_state response_state
)
660 struct controlvm_message outmsg
;
661 u32 bus_no
= p
->chipset_bus_no
;
662 u32 dev_no
= p
->chipset_dev_no
;
664 if (!p
->pending_msg_hdr
)
666 if (p
->pending_msg_hdr
->id
!= cmd_id
)
669 controlvm_init_response(&outmsg
, p
->pending_msg_hdr
, response
);
671 outmsg
.cmd
.device_change_state
.bus_no
= bus_no
;
672 outmsg
.cmd
.device_change_state
.dev_no
= dev_no
;
673 outmsg
.cmd
.device_change_state
.state
= response_state
;
675 return visorchannel_signalinsert(controlvm_channel
,
676 CONTROLVM_QUEUE_REQUEST
, &outmsg
);
680 device_responder(enum controlvm_id cmd_id
,
681 struct controlvm_message_header
*pending_msg_hdr
,
684 if (!pending_msg_hdr
)
687 if (pending_msg_hdr
->id
!= (u32
)cmd_id
)
690 return controlvm_respond(pending_msg_hdr
, response
);
694 bus_create(struct controlvm_message
*inmsg
)
696 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
697 struct controlvm_message_header
*pmsg_hdr
= NULL
;
698 u32 bus_no
= cmd
->create_bus
.bus_no
;
699 struct visor_device
*bus_info
;
700 struct visorchannel
*visorchannel
;
703 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
704 if (bus_info
&& (bus_info
->state
.created
== 1)) {
705 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC
, 0, bus_no
,
706 POSTCODE_SEVERITY_ERR
);
711 bus_info
= kzalloc(sizeof(*bus_info
), GFP_KERNEL
);
713 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC
, 0, bus_no
,
714 POSTCODE_SEVERITY_ERR
);
719 INIT_LIST_HEAD(&bus_info
->list_all
);
720 bus_info
->chipset_bus_no
= bus_no
;
721 bus_info
->chipset_dev_no
= BUS_ROOT_DEVICE
;
723 POSTCODE_LINUX(BUS_CREATE_ENTRY_PC
, 0, bus_no
, POSTCODE_SEVERITY_INFO
);
725 if (uuid_le_cmp(cmd
->create_bus
.bus_inst_uuid
, spar_siovm_uuid
) == 0)
726 save_crash_message(inmsg
, CRASH_BUS
);
728 if (inmsg
->hdr
.flags
.response_expected
== 1) {
729 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
),
732 POSTCODE_LINUX_4(MALLOC_FAILURE_PC
, cmd
,
733 bus_info
->chipset_bus_no
,
734 POSTCODE_SEVERITY_ERR
);
736 goto err_free_bus_info
;
739 memcpy(pmsg_hdr
, &inmsg
->hdr
,
740 sizeof(struct controlvm_message_header
));
741 bus_info
->pending_msg_hdr
= pmsg_hdr
;
744 visorchannel
= visorchannel_create(cmd
->create_bus
.channel_addr
,
745 cmd
->create_bus
.channel_bytes
,
747 cmd
->create_bus
.bus_data_type_uuid
);
750 POSTCODE_LINUX(BUS_CREATE_FAILURE_PC
, 0, bus_no
,
751 POSTCODE_SEVERITY_ERR
);
753 goto err_free_pending_msg
;
755 bus_info
->visorchannel
= visorchannel
;
757 /* Response will be handled by chipset_bus_create */
758 chipset_bus_create(bus_info
);
760 POSTCODE_LINUX(BUS_CREATE_EXIT_PC
, 0, bus_no
, POSTCODE_SEVERITY_INFO
);
763 err_free_pending_msg
:
764 kfree(bus_info
->pending_msg_hdr
);
770 if (inmsg
->hdr
.flags
.response_expected
== 1)
771 bus_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, err
);
776 bus_destroy(struct controlvm_message
*inmsg
)
778 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
779 struct controlvm_message_header
*pmsg_hdr
= NULL
;
780 u32 bus_no
= cmd
->destroy_bus
.bus_no
;
781 struct visor_device
*bus_info
;
784 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
789 if (bus_info
->state
.created
== 0) {
793 if (bus_info
->pending_msg_hdr
) {
794 /* only non-NULL if dev is still waiting on a response */
798 if (inmsg
->hdr
.flags
.response_expected
== 1) {
799 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
801 POSTCODE_LINUX_4(MALLOC_FAILURE_PC
, cmd
,
802 bus_info
->chipset_bus_no
,
803 POSTCODE_SEVERITY_ERR
);
808 memcpy(pmsg_hdr
, &inmsg
->hdr
,
809 sizeof(struct controlvm_message_header
));
810 bus_info
->pending_msg_hdr
= pmsg_hdr
;
813 /* Response will be handled by chipset_bus_destroy */
814 chipset_bus_destroy(bus_info
);
818 if (inmsg
->hdr
.flags
.response_expected
== 1)
819 bus_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, err
);
824 bus_configure(struct controlvm_message
*inmsg
,
825 struct parser_context
*parser_ctx
)
827 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
829 struct visor_device
*bus_info
;
832 bus_no
= cmd
->configure_bus
.bus_no
;
833 POSTCODE_LINUX(BUS_CONFIGURE_ENTRY_PC
, 0, bus_no
,
834 POSTCODE_SEVERITY_INFO
);
836 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
838 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC
, 0, bus_no
,
839 POSTCODE_SEVERITY_ERR
);
842 } else if (bus_info
->state
.created
== 0) {
843 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC
, 0, bus_no
,
844 POSTCODE_SEVERITY_ERR
);
847 } else if (bus_info
->pending_msg_hdr
) {
848 POSTCODE_LINUX(BUS_CONFIGURE_FAILURE_PC
, 0, bus_no
,
849 POSTCODE_SEVERITY_ERR
);
854 err
= visorchannel_set_clientpartition
855 (bus_info
->visorchannel
,
856 cmd
->configure_bus
.guest_handle
);
860 bus_info
->partition_uuid
= parser_id_get(parser_ctx
);
861 parser_param_start(parser_ctx
, PARSERSTRING_NAME
);
862 bus_info
->name
= parser_string_get(parser_ctx
);
864 POSTCODE_LINUX(BUS_CONFIGURE_EXIT_PC
, 0, bus_no
,
865 POSTCODE_SEVERITY_INFO
);
867 if (inmsg
->hdr
.flags
.response_expected
== 1)
868 bus_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, err
);
872 if (inmsg
->hdr
.flags
.response_expected
== 1)
873 bus_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, err
);
878 my_device_create(struct controlvm_message
*inmsg
)
880 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
881 struct controlvm_message_header
*pmsg_hdr
= NULL
;
882 u32 bus_no
= cmd
->create_device
.bus_no
;
883 u32 dev_no
= cmd
->create_device
.dev_no
;
884 struct visor_device
*dev_info
= NULL
;
885 struct visor_device
*bus_info
;
886 struct visorchannel
*visorchannel
;
887 int rc
= CONTROLVM_RESP_SUCCESS
;
889 bus_info
= visorbus_get_device_by_id(bus_no
, BUS_ROOT_DEVICE
, NULL
);
891 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
892 POSTCODE_SEVERITY_ERR
);
893 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
897 if (bus_info
->state
.created
== 0) {
898 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
899 POSTCODE_SEVERITY_ERR
);
900 rc
= -CONTROLVM_RESP_ERROR_BUS_INVALID
;
904 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
905 if (dev_info
&& (dev_info
->state
.created
== 1)) {
906 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
907 POSTCODE_SEVERITY_ERR
);
908 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
912 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
914 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
915 POSTCODE_SEVERITY_ERR
);
916 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
920 dev_info
->chipset_bus_no
= bus_no
;
921 dev_info
->chipset_dev_no
= dev_no
;
922 dev_info
->inst
= cmd
->create_device
.dev_inst_uuid
;
924 /* not sure where the best place to set the 'parent' */
925 dev_info
->device
.parent
= &bus_info
->device
;
927 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC
, dev_no
, bus_no
,
928 POSTCODE_SEVERITY_INFO
);
931 visorchannel_create_with_lock(cmd
->create_device
.channel_addr
,
932 cmd
->create_device
.channel_bytes
,
934 cmd
->create_device
.data_type_uuid
);
937 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC
, dev_no
, bus_no
,
938 POSTCODE_SEVERITY_ERR
);
939 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
940 goto out_free_dev_info
;
942 dev_info
->visorchannel
= visorchannel
;
943 dev_info
->channel_type_guid
= cmd
->create_device
.data_type_uuid
;
944 if (uuid_le_cmp(cmd
->create_device
.data_type_uuid
,
945 spar_vhba_channel_protocol_uuid
) == 0)
946 save_crash_message(inmsg
, CRASH_DEV
);
948 if (inmsg
->hdr
.flags
.response_expected
== 1) {
949 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
951 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
952 goto out_free_dev_info
;
955 memcpy(pmsg_hdr
, &inmsg
->hdr
,
956 sizeof(struct controlvm_message_header
));
957 dev_info
->pending_msg_hdr
= pmsg_hdr
;
959 /* Chipset_device_create will send response */
960 chipset_device_create(dev_info
);
961 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC
, dev_no
, bus_no
,
962 POSTCODE_SEVERITY_INFO
);
969 if (inmsg
->hdr
.flags
.response_expected
== 1)
970 device_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, rc
);
974 my_device_changestate(struct controlvm_message
*inmsg
)
976 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
977 struct controlvm_message_header
*pmsg_hdr
= NULL
;
978 u32 bus_no
= cmd
->device_change_state
.bus_no
;
979 u32 dev_no
= cmd
->device_change_state
.dev_no
;
980 struct spar_segment_state state
= cmd
->device_change_state
.state
;
981 struct visor_device
*dev_info
;
982 int rc
= CONTROLVM_RESP_SUCCESS
;
984 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
986 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
987 POSTCODE_SEVERITY_ERR
);
988 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
991 if (dev_info
->state
.created
== 0) {
992 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC
, dev_no
, bus_no
,
993 POSTCODE_SEVERITY_ERR
);
994 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
997 if (dev_info
->pending_msg_hdr
) {
998 /* only non-NULL if dev is still waiting on a response */
999 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1002 if (inmsg
->hdr
.flags
.response_expected
== 1) {
1003 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1005 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1009 memcpy(pmsg_hdr
, &inmsg
->hdr
,
1010 sizeof(struct controlvm_message_header
));
1011 dev_info
->pending_msg_hdr
= pmsg_hdr
;
1014 if (state
.alive
== segment_state_running
.alive
&&
1015 state
.operating
== segment_state_running
.operating
)
1016 /* Response will be sent from chipset_device_resume */
1017 chipset_device_resume(dev_info
);
1018 /* ServerNotReady / ServerLost / SegmentStateStandby */
1019 else if (state
.alive
== segment_state_standby
.alive
&&
1020 state
.operating
== segment_state_standby
.operating
)
1022 * technically this is standby case where server is lost.
1023 * Response will be sent from chipset_device_pause.
1025 chipset_device_pause(dev_info
);
1030 if (inmsg
->hdr
.flags
.response_expected
== 1)
1031 device_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, rc
);
1035 my_device_destroy(struct controlvm_message
*inmsg
)
1037 struct controlvm_message_packet
*cmd
= &inmsg
->cmd
;
1038 struct controlvm_message_header
*pmsg_hdr
= NULL
;
1039 u32 bus_no
= cmd
->destroy_device
.bus_no
;
1040 u32 dev_no
= cmd
->destroy_device
.dev_no
;
1041 struct visor_device
*dev_info
;
1042 int rc
= CONTROLVM_RESP_SUCCESS
;
1044 dev_info
= visorbus_get_device_by_id(bus_no
, dev_no
, NULL
);
1046 rc
= -CONTROLVM_RESP_ERROR_DEVICE_INVALID
;
1049 if (dev_info
->state
.created
== 0) {
1050 rc
= -CONTROLVM_RESP_ERROR_ALREADY_DONE
;
1054 if (dev_info
->pending_msg_hdr
) {
1055 /* only non-NULL if dev is still waiting on a response */
1056 rc
= -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT
;
1059 if (inmsg
->hdr
.flags
.response_expected
== 1) {
1060 pmsg_hdr
= kzalloc(sizeof(*pmsg_hdr
), GFP_KERNEL
);
1062 rc
= -CONTROLVM_RESP_ERROR_KMALLOC_FAILED
;
1066 memcpy(pmsg_hdr
, &inmsg
->hdr
,
1067 sizeof(struct controlvm_message_header
));
1068 dev_info
->pending_msg_hdr
= pmsg_hdr
;
1071 chipset_device_destroy(dev_info
);
1075 if (inmsg
->hdr
.flags
.response_expected
== 1)
1076 device_responder(inmsg
->hdr
.id
, &inmsg
->hdr
, rc
);
1080 * initialize_controlvm_payload_info() - init controlvm_payload_info struct
1081 * @phys_addr: the physical address of controlvm channel
1082 * @offset: the offset to payload
1083 * @bytes: the size of the payload in bytes
1084 * @info: the returning valid struct
1086 * When provided with the physical address of the controlvm channel
1087 * (phys_addr), the offset to the payload area we need to manage
1088 * (offset), and the size of this payload area (bytes), fills in the
1089 * controlvm_payload_info struct.
1091 * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
1094 initialize_controlvm_payload_info(u64 phys_addr
, u64 offset
, u32 bytes
,
1095 struct visor_controlvm_payload_info
*info
)
1100 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1102 if ((offset
== 0) || (bytes
== 0))
1103 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID
;
1105 payload
= memremap(phys_addr
+ offset
, bytes
, MEMREMAP_WB
);
1107 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED
;
1109 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1110 info
->offset
= offset
;
1111 info
->bytes
= bytes
;
1112 info
->ptr
= payload
;
1114 return CONTROLVM_RESP_SUCCESS
;
1118 destroy_controlvm_payload_info(struct visor_controlvm_payload_info
*info
)
1121 memunmap(info
->ptr
);
1124 memset(info
, 0, sizeof(struct visor_controlvm_payload_info
));
1128 initialize_controlvm_payload(void)
1130 u64 phys_addr
= visorchannel_get_physaddr(controlvm_channel
);
1131 u64 payload_offset
= 0;
1132 u32 payload_bytes
= 0;
1134 if (visorchannel_read(controlvm_channel
,
1135 offsetof(struct spar_controlvm_channel_protocol
,
1136 request_payload_offset
),
1137 &payload_offset
, sizeof(payload_offset
)) < 0) {
1138 POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC
, 0, 0,
1139 POSTCODE_SEVERITY_ERR
);
1142 if (visorchannel_read(controlvm_channel
,
1143 offsetof(struct spar_controlvm_channel_protocol
,
1144 request_payload_bytes
),
1145 &payload_bytes
, sizeof(payload_bytes
)) < 0) {
1146 POSTCODE_LINUX(CONTROLVM_INIT_FAILURE_PC
, 0, 0,
1147 POSTCODE_SEVERITY_ERR
);
1150 initialize_controlvm_payload_info(phys_addr
,
1151 payload_offset
, payload_bytes
,
1152 &controlvm_payload_info
);
1156 * The general parahotplug flow works as follows. The visorchipset
1157 * driver receives a DEVICE_CHANGESTATE message from Command
1158 * specifying a physical device to enable or disable. The CONTROLVM
1159 * message handler calls parahotplug_process_message, which then adds
1160 * the message to a global list and kicks off a udev event which
1161 * causes a user level script to enable or disable the specified
1162 * device. The udev script then writes to
1163 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1164 * to get called, at which point the appropriate CONTROLVM message is
1165 * retrieved from the list and responded to.
1168 #define PARAHOTPLUG_TIMEOUT_MS 2000
1171 * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
1172 * message with a udev script /proc response
1174 * Return: a unique integer value
1177 parahotplug_next_id(void)
1179 static atomic_t id
= ATOMIC_INIT(0);
1181 return atomic_inc_return(&id
);
1185 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1186 * CONTROLVM message on the list should expire
1187 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1189 * Return: expected expiration time (in jiffies)
1191 static unsigned long
1192 parahotplug_next_expiration(void)
1194 return jiffies
+ msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS
);
1198 * parahotplug_request_create() - create a parahotplug_request, which is
1199 * basically a wrapper for a CONTROLVM_MESSAGE
1200 * that we can stick on a list
1201 * @msg: the message to insert in the request
1203 * Return: the request containing the provided message
1205 static struct parahotplug_request
*
1206 parahotplug_request_create(struct controlvm_message
*msg
)
1208 struct parahotplug_request
*req
;
1210 req
= kmalloc(sizeof(*req
), GFP_KERNEL
| __GFP_NORETRY
);
1214 req
->id
= parahotplug_next_id();
1215 req
->expiration
= parahotplug_next_expiration();
1222 * parahotplug_request_destroy() - free a parahotplug_request
1223 * @req: the request to deallocate
1226 parahotplug_request_destroy(struct parahotplug_request
*req
)
1231 static LIST_HEAD(parahotplug_request_list
);
1232 static DEFINE_SPINLOCK(parahotplug_request_list_lock
); /* lock for above */
1235 * parahotplug_request_complete() - mark request as complete
1236 * @id: the id of the request
1237 * @active: indicates whether the request is assigned to active partition
1239 * Called from the /proc handler, which means the user script has
1240 * finished the enable/disable. Find the matching identifier, and
1241 * respond to the CONTROLVM message with success.
1243 * Return: 0 on success or -EINVAL on failure
1246 parahotplug_request_complete(int id
, u16 active
)
1248 struct list_head
*pos
;
1249 struct list_head
*tmp
;
1251 spin_lock(¶hotplug_request_list_lock
);
1253 /* Look for a request matching "id". */
1254 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
1255 struct parahotplug_request
*req
=
1256 list_entry(pos
, struct parahotplug_request
, list
);
1257 if (req
->id
== id
) {
1259 * Found a match. Remove it from the list and
1263 spin_unlock(¶hotplug_request_list_lock
);
1264 req
->msg
.cmd
.device_change_state
.state
.active
= active
;
1265 if (req
->msg
.hdr
.flags
.response_expected
)
1266 controlvm_respond_physdev_changestate(
1267 &req
->msg
.hdr
, CONTROLVM_RESP_SUCCESS
,
1268 req
->msg
.cmd
.device_change_state
.state
);
1269 parahotplug_request_destroy(req
);
1274 spin_unlock(¶hotplug_request_list_lock
);
1279 * devicedisabled_store() - disables the hotplug device
1280 * @dev: sysfs interface variable not utilized in this function
1281 * @attr: sysfs interface variable not utilized in this function
1282 * @buf: buffer containing the device id
1283 * @count: the size of the buffer
1285 * The parahotplug/devicedisabled interface gets called by our support script
1286 * when an SR-IOV device has been shut down. The ID is passed to the script
1287 * and then passed back when the device has been removed.
1289 * Return: the size of the buffer for success or negative for error
1291 static ssize_t
devicedisabled_store(struct device
*dev
,
1292 struct device_attribute
*attr
,
1293 const char *buf
, size_t count
)
1298 if (kstrtouint(buf
, 10, &id
))
1301 err
= parahotplug_request_complete(id
, 0);
1306 static DEVICE_ATTR_WO(devicedisabled
);
1309 * deviceenabled_store() - enables the hotplug device
1310 * @dev: sysfs interface variable not utilized in this function
1311 * @attr: sysfs interface variable not utilized in this function
1312 * @buf: buffer containing the device id
1313 * @count: the size of the buffer
1315 * The parahotplug/deviceenabled interface gets called by our support script
1316 * when an SR-IOV device has been recovered. The ID is passed to the script
1317 * and then passed back when the device has been brought back up.
1319 * Return: the size of the buffer for success or negative for error
1321 static ssize_t
deviceenabled_store(struct device
*dev
,
1322 struct device_attribute
*attr
,
1323 const char *buf
, size_t count
)
1327 if (kstrtouint(buf
, 10, &id
))
1330 parahotplug_request_complete(id
, 1);
1333 static DEVICE_ATTR_WO(deviceenabled
);
1335 static struct attribute
*visorchipset_install_attrs
[] = {
1336 &dev_attr_toolaction
.attr
,
1337 &dev_attr_boottotool
.attr
,
1338 &dev_attr_error
.attr
,
1339 &dev_attr_textid
.attr
,
1340 &dev_attr_remaining_steps
.attr
,
1344 static const struct attribute_group visorchipset_install_group
= {
1346 .attrs
= visorchipset_install_attrs
1349 static struct attribute
*visorchipset_parahotplug_attrs
[] = {
1350 &dev_attr_devicedisabled
.attr
,
1351 &dev_attr_deviceenabled
.attr
,
1355 static struct attribute_group visorchipset_parahotplug_group
= {
1356 .name
= "parahotplug",
1357 .attrs
= visorchipset_parahotplug_attrs
1360 static const struct attribute_group
*visorchipset_dev_groups
[] = {
1361 &visorchipset_install_group
,
1362 &visorchipset_parahotplug_group
,
1366 static void visorchipset_dev_release(struct device
*dev
)
1370 /* /sys/devices/platform/visorchipset */
1371 static struct platform_device visorchipset_platform_device
= {
1372 .name
= "visorchipset",
1374 .dev
.groups
= visorchipset_dev_groups
,
1375 .dev
.release
= visorchipset_dev_release
,
1379 * parahotplug_request_kickoff() - initiate parahotplug request
1380 * @req: the request to initiate
1382 * Cause uevent to run the user level script to do the disable/enable specified
1383 * in the parahotplug_request.
1386 parahotplug_request_kickoff(struct parahotplug_request
*req
)
1388 struct controlvm_message_packet
*cmd
= &req
->msg
.cmd
;
1389 char env_cmd
[40], env_id
[40], env_state
[40], env_bus
[40], env_dev
[40],
1392 env_cmd
, env_id
, env_state
, env_bus
, env_dev
, env_func
, NULL
1395 sprintf(env_cmd
, "SPAR_PARAHOTPLUG=1");
1396 sprintf(env_id
, "SPAR_PARAHOTPLUG_ID=%d", req
->id
);
1397 sprintf(env_state
, "SPAR_PARAHOTPLUG_STATE=%d",
1398 cmd
->device_change_state
.state
.active
);
1399 sprintf(env_bus
, "SPAR_PARAHOTPLUG_BUS=%d",
1400 cmd
->device_change_state
.bus_no
);
1401 sprintf(env_dev
, "SPAR_PARAHOTPLUG_DEVICE=%d",
1402 cmd
->device_change_state
.dev_no
>> 3);
1403 sprintf(env_func
, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1404 cmd
->device_change_state
.dev_no
& 0x7);
1406 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1411 * parahotplug_process_message() - enables or disables a PCI device by kicking
1413 * @inmsg: the message indicating whether to enable or disable
1416 parahotplug_process_message(struct controlvm_message
*inmsg
)
1418 struct parahotplug_request
*req
;
1420 req
= parahotplug_request_create(inmsg
);
1425 if (inmsg
->cmd
.device_change_state
.state
.active
) {
1427 * For enable messages, just respond with success
1428 * right away. This is a bit of a hack, but there are
1429 * issues with the early enable messages we get (with
1430 * either the udev script not detecting that the device
1431 * is up, or not getting called at all). Fortunately
1432 * the messages that get lost don't matter anyway, as
1434 * devices are automatically enabled at
1437 parahotplug_request_kickoff(req
);
1438 controlvm_respond_physdev_changestate
1440 CONTROLVM_RESP_SUCCESS
,
1441 inmsg
->cmd
.device_change_state
.state
);
1442 parahotplug_request_destroy(req
);
1445 * For disable messages, add the request to the
1446 * request list before kicking off the udev script. It
1447 * won't get responded to until the script has
1448 * indicated it's done.
1450 spin_lock(¶hotplug_request_list_lock
);
1451 list_add_tail(&req
->list
, ¶hotplug_request_list
);
1452 spin_unlock(¶hotplug_request_list_lock
);
1454 parahotplug_request_kickoff(req
);
1459 * visorchipset_chipset_ready() - sends chipset_ready action
1461 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1463 * Return: CONTROLVM_RESP_SUCCESS
1466 visorchipset_chipset_ready(void)
1468 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_ONLINE
);
1469 return CONTROLVM_RESP_SUCCESS
;
1473 visorchipset_chipset_selftest(void)
1475 char env_selftest
[20];
1476 char *envp
[] = { env_selftest
, NULL
};
1478 sprintf(env_selftest
, "SPARSP_SELFTEST=%d", 1);
1479 kobject_uevent_env(&visorchipset_platform_device
.dev
.kobj
, KOBJ_CHANGE
,
1481 return CONTROLVM_RESP_SUCCESS
;
1485 * visorchipset_chipset_notready() - sends chipset_notready action
1487 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1489 * Return: CONTROLVM_RESP_SUCCESS
1492 visorchipset_chipset_notready(void)
1494 kobject_uevent(&visorchipset_platform_device
.dev
.kobj
, KOBJ_OFFLINE
);
1495 return CONTROLVM_RESP_SUCCESS
;
1499 chipset_ready(struct controlvm_message_header
*msg_hdr
)
1501 int rc
= visorchipset_chipset_ready();
1503 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1505 if (msg_hdr
->flags
.response_expected
)
1506 controlvm_respond(msg_hdr
, rc
);
1510 chipset_selftest(struct controlvm_message_header
*msg_hdr
)
1512 int rc
= visorchipset_chipset_selftest();
1514 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1516 if (msg_hdr
->flags
.response_expected
)
1517 controlvm_respond(msg_hdr
, rc
);
1521 chipset_notready(struct controlvm_message_header
*msg_hdr
)
1523 int rc
= visorchipset_chipset_notready();
1525 if (rc
!= CONTROLVM_RESP_SUCCESS
)
1527 if (msg_hdr
->flags
.response_expected
)
1528 controlvm_respond(msg_hdr
, rc
);
1531 static inline unsigned int
1532 issue_vmcall_io_controlvm_addr(u64
*control_addr
, u32
*control_bytes
)
1534 struct vmcall_io_controlvm_addr_params params
;
1535 int result
= VMCALL_SUCCESS
;
1538 physaddr
= virt_to_phys(¶ms
);
1539 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR
, physaddr
, result
);
1540 if (VMCALL_SUCCESSFUL(result
)) {
1541 *control_addr
= params
.address
;
1542 *control_bytes
= params
.channel_bytes
;
1547 static u64
controlvm_get_channel_address(void)
1552 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr
, &size
)))
1559 setup_crash_devices_work_queue(struct work_struct
*work
)
1561 struct controlvm_message local_crash_bus_msg
;
1562 struct controlvm_message local_crash_dev_msg
;
1563 struct controlvm_message msg
;
1564 u32 local_crash_msg_offset
;
1565 u16 local_crash_msg_count
;
1567 POSTCODE_LINUX(CRASH_DEV_ENTRY_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
1569 /* send init chipset msg */
1570 msg
.hdr
.id
= CONTROLVM_CHIPSET_INIT
;
1571 msg
.cmd
.init_chipset
.bus_count
= 23;
1572 msg
.cmd
.init_chipset
.switch_count
= 0;
1576 /* get saved message count */
1577 if (visorchannel_read(controlvm_channel
,
1578 offsetof(struct spar_controlvm_channel_protocol
,
1579 saved_crash_message_count
),
1580 &local_crash_msg_count
, sizeof(u16
)) < 0) {
1581 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC
, 0, 0,
1582 POSTCODE_SEVERITY_ERR
);
1586 if (local_crash_msg_count
!= CONTROLVM_CRASHMSG_MAX
) {
1587 POSTCODE_LINUX(CRASH_DEV_COUNT_FAILURE_PC
, 0,
1588 local_crash_msg_count
,
1589 POSTCODE_SEVERITY_ERR
);
1593 /* get saved crash message offset */
1594 if (visorchannel_read(controlvm_channel
,
1595 offsetof(struct spar_controlvm_channel_protocol
,
1596 saved_crash_message_offset
),
1597 &local_crash_msg_offset
, sizeof(u32
)) < 0) {
1598 POSTCODE_LINUX(CRASH_DEV_CTRL_RD_FAILURE_PC
, 0, 0,
1599 POSTCODE_SEVERITY_ERR
);
1603 /* read create device message for storage bus offset */
1604 if (visorchannel_read(controlvm_channel
,
1605 local_crash_msg_offset
,
1606 &local_crash_bus_msg
,
1607 sizeof(struct controlvm_message
)) < 0) {
1608 POSTCODE_LINUX(CRASH_DEV_RD_BUS_FAIULRE_PC
, 0, 0,
1609 POSTCODE_SEVERITY_ERR
);
1613 /* read create device message for storage device */
1614 if (visorchannel_read(controlvm_channel
,
1615 local_crash_msg_offset
+
1616 sizeof(struct controlvm_message
),
1617 &local_crash_dev_msg
,
1618 sizeof(struct controlvm_message
)) < 0) {
1619 POSTCODE_LINUX(CRASH_DEV_RD_DEV_FAIULRE_PC
, 0, 0,
1620 POSTCODE_SEVERITY_ERR
);
1624 /* reuse IOVM create bus message */
1625 if (local_crash_bus_msg
.cmd
.create_bus
.channel_addr
) {
1626 bus_create(&local_crash_bus_msg
);
1628 POSTCODE_LINUX(CRASH_DEV_BUS_NULL_FAILURE_PC
, 0, 0,
1629 POSTCODE_SEVERITY_ERR
);
1633 /* reuse create device message for storage device */
1634 if (local_crash_dev_msg
.cmd
.create_device
.channel_addr
) {
1635 my_device_create(&local_crash_dev_msg
);
1637 POSTCODE_LINUX(CRASH_DEV_DEV_NULL_FAILURE_PC
, 0, 0,
1638 POSTCODE_SEVERITY_ERR
);
1641 POSTCODE_LINUX(CRASH_DEV_EXIT_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
1645 bus_create_response(struct visor_device
*bus_info
, int response
)
1648 bus_info
->state
.created
= 1;
1650 bus_responder(CONTROLVM_BUS_CREATE
, bus_info
->pending_msg_hdr
,
1653 kfree(bus_info
->pending_msg_hdr
);
1654 bus_info
->pending_msg_hdr
= NULL
;
1658 bus_destroy_response(struct visor_device
*bus_info
, int response
)
1660 bus_responder(CONTROLVM_BUS_DESTROY
, bus_info
->pending_msg_hdr
,
1663 kfree(bus_info
->pending_msg_hdr
);
1664 bus_info
->pending_msg_hdr
= NULL
;
1668 device_create_response(struct visor_device
*dev_info
, int response
)
1671 dev_info
->state
.created
= 1;
1673 device_responder(CONTROLVM_DEVICE_CREATE
, dev_info
->pending_msg_hdr
,
1676 kfree(dev_info
->pending_msg_hdr
);
1677 dev_info
->pending_msg_hdr
= NULL
;
1681 device_destroy_response(struct visor_device
*dev_info
, int response
)
1683 device_responder(CONTROLVM_DEVICE_DESTROY
, dev_info
->pending_msg_hdr
,
1686 kfree(dev_info
->pending_msg_hdr
);
1687 dev_info
->pending_msg_hdr
= NULL
;
1691 device_pause_response(struct visor_device
*dev_info
,
1694 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
1696 segment_state_standby
);
1698 kfree(dev_info
->pending_msg_hdr
);
1699 dev_info
->pending_msg_hdr
= NULL
;
1703 device_resume_response(struct visor_device
*dev_info
, int response
)
1705 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE
,
1707 segment_state_running
);
1709 kfree(dev_info
->pending_msg_hdr
);
1710 dev_info
->pending_msg_hdr
= NULL
;
1714 visorchipset_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1716 unsigned long physaddr
= 0;
1717 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
1720 /* sv_enable_dfp(); */
1721 if (offset
& (PAGE_SIZE
- 1))
1722 return -ENXIO
; /* need aligned offsets */
1725 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET
:
1726 vma
->vm_flags
|= VM_IO
;
1727 if (!*file_controlvm_channel
)
1731 (*file_controlvm_channel
,
1732 offsetof(struct spar_controlvm_channel_protocol
,
1733 gp_control_channel
),
1734 &addr
, sizeof(addr
));
1738 physaddr
= (unsigned long)addr
;
1739 if (remap_pfn_range(vma
, vma
->vm_start
,
1740 physaddr
>> PAGE_SHIFT
,
1741 vma
->vm_end
- vma
->vm_start
,
1742 /*pgprot_noncached */
1743 (vma
->vm_page_prot
))) {
1753 static inline s64
issue_vmcall_query_guest_virtual_time_offset(void)
1755 u64 result
= VMCALL_SUCCESS
;
1758 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
, physaddr
,
1763 static inline int issue_vmcall_update_physical_time(u64 adjustment
)
1765 int result
= VMCALL_SUCCESS
;
1767 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME
, adjustment
, result
);
1771 static long visorchipset_ioctl(struct file
*file
, unsigned int cmd
,
1778 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET
:
1779 /* get the physical rtc offset */
1780 vrtc_offset
= issue_vmcall_query_guest_virtual_time_offset();
1781 if (copy_to_user((void __user
*)arg
, &vrtc_offset
,
1782 sizeof(vrtc_offset
))) {
1786 case VMCALL_UPDATE_PHYSICAL_TIME
:
1787 if (copy_from_user(&adjustment
, (void __user
*)arg
,
1788 sizeof(adjustment
))) {
1791 return issue_vmcall_update_physical_time(adjustment
);
1797 static const struct file_operations visorchipset_fops
= {
1798 .owner
= THIS_MODULE
,
1799 .open
= visorchipset_open
,
1802 .unlocked_ioctl
= visorchipset_ioctl
,
1803 .release
= visorchipset_release
,
1804 .mmap
= visorchipset_mmap
,
1808 visorchipset_file_init(dev_t major_dev
, struct visorchannel
**controlvm_channel
)
1812 file_controlvm_channel
= controlvm_channel
;
1813 cdev_init(&file_cdev
, &visorchipset_fops
);
1814 file_cdev
.owner
= THIS_MODULE
;
1815 if (MAJOR(major_dev
) == 0) {
1816 rc
= alloc_chrdev_region(&major_dev
, 0, 1, "visorchipset");
1817 /* dynamic major device number registration required */
1821 /* static major device number registration required */
1822 rc
= register_chrdev_region(major_dev
, 1, "visorchipset");
1826 rc
= cdev_add(&file_cdev
, MKDEV(MAJOR(major_dev
), 0), 1);
1828 unregister_chrdev_region(major_dev
, 1);
1835 visorchipset_file_cleanup(dev_t major_dev
)
1838 cdev_del(&file_cdev
);
1839 file_cdev
.ops
= NULL
;
1840 unregister_chrdev_region(major_dev
, 1);
1843 static struct parser_context
*
1844 parser_init_byte_stream(u64 addr
, u32 bytes
, bool local
, bool *retry
)
1846 int allocbytes
= sizeof(struct parser_context
) + bytes
;
1847 struct parser_context
*ctx
;
1853 * alloc an 0 extra byte to ensure payload is
1857 if ((controlvm_payload_bytes_buffered
+ bytes
)
1858 > MAX_CONTROLVM_PAYLOAD_BYTES
) {
1863 ctx
= kzalloc(allocbytes
, GFP_KERNEL
| __GFP_NORETRY
);
1870 ctx
->allocbytes
= allocbytes
;
1871 ctx
->param_bytes
= bytes
;
1873 ctx
->bytes_remaining
= 0;
1874 ctx
->byte_stream
= false;
1878 if (addr
> virt_to_phys(high_memory
- 1))
1879 goto err_finish_ctx
;
1880 p
= __va((unsigned long)(addr
));
1881 memcpy(ctx
->data
, p
, bytes
);
1883 void *mapping
= memremap(addr
, bytes
, MEMREMAP_WB
);
1886 goto err_finish_ctx
;
1887 memcpy(ctx
->data
, mapping
, bytes
);
1891 ctx
->byte_stream
= true;
1892 controlvm_payload_bytes_buffered
+= ctx
->param_bytes
;
1902 * handle_command() - process a controlvm message
1903 * @inmsg: the message to process
1904 * @channel_addr: address of the controlvm channel
1907 * false - this function will return false only in the case where the
1908 * controlvm message was NOT processed, but processing must be
1909 * retried before reading the next controlvm message; a
1910 * scenario where this can occur is when we need to throttle
1911 * the allocation of memory in which to copy out controlvm
1913 * true - processing of the controlvm message completed,
1914 * either successfully or with an error
1917 handle_command(struct controlvm_message inmsg
, u64 channel_addr
)
1919 struct controlvm_message_packet
*cmd
= &inmsg
.cmd
;
1922 struct parser_context
*parser_ctx
= NULL
;
1924 struct controlvm_message ackmsg
;
1926 /* create parsing context if necessary */
1927 local_addr
= (inmsg
.hdr
.flags
.test_message
== 1);
1928 if (channel_addr
== 0)
1930 parm_addr
= channel_addr
+ inmsg
.hdr
.payload_vm_offset
;
1931 parm_bytes
= inmsg
.hdr
.payload_bytes
;
1934 * Parameter and channel addresses within test messages actually lie
1935 * within our OS-controlled memory. We need to know that, because it
1936 * makes a difference in how we compute the virtual address.
1938 if (parm_addr
&& parm_bytes
) {
1942 parser_init_byte_stream(parm_addr
, parm_bytes
,
1943 local_addr
, &retry
);
1944 if (!parser_ctx
&& retry
)
1949 controlvm_init_response(&ackmsg
, &inmsg
.hdr
,
1950 CONTROLVM_RESP_SUCCESS
);
1951 if (controlvm_channel
)
1952 visorchannel_signalinsert(controlvm_channel
,
1953 CONTROLVM_QUEUE_ACK
,
1956 switch (inmsg
.hdr
.id
) {
1957 case CONTROLVM_CHIPSET_INIT
:
1958 chipset_init(&inmsg
);
1960 case CONTROLVM_BUS_CREATE
:
1963 case CONTROLVM_BUS_DESTROY
:
1964 bus_destroy(&inmsg
);
1966 case CONTROLVM_BUS_CONFIGURE
:
1967 bus_configure(&inmsg
, parser_ctx
);
1969 case CONTROLVM_DEVICE_CREATE
:
1970 my_device_create(&inmsg
);
1972 case CONTROLVM_DEVICE_CHANGESTATE
:
1973 if (cmd
->device_change_state
.flags
.phys_device
) {
1974 parahotplug_process_message(&inmsg
);
1977 * save the hdr and cmd structures for later use
1978 * when sending back the response to Command
1980 my_device_changestate(&inmsg
);
1984 case CONTROLVM_DEVICE_DESTROY
:
1985 my_device_destroy(&inmsg
);
1987 case CONTROLVM_DEVICE_CONFIGURE
:
1988 /* no op for now, just send a respond that we passed */
1989 if (inmsg
.hdr
.flags
.response_expected
)
1990 controlvm_respond(&inmsg
.hdr
, CONTROLVM_RESP_SUCCESS
);
1992 case CONTROLVM_CHIPSET_READY
:
1993 chipset_ready(&inmsg
.hdr
);
1995 case CONTROLVM_CHIPSET_SELFTEST
:
1996 chipset_selftest(&inmsg
.hdr
);
1998 case CONTROLVM_CHIPSET_STOP
:
1999 chipset_notready(&inmsg
.hdr
);
2002 if (inmsg
.hdr
.flags
.response_expected
)
2005 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN
);
2010 parser_done(parser_ctx
);
2017 * read_controlvm_event() - retreives the next message from the
2018 * CONTROLVM_QUEUE_EVENT queue in the controlvm
2020 * @msg: pointer to the retrieved message
2022 * Return: true if a valid message was retrieved or false otherwise
2025 read_controlvm_event(struct controlvm_message
*msg
)
2027 if (!visorchannel_signalremove(controlvm_channel
,
2028 CONTROLVM_QUEUE_EVENT
, msg
)) {
2030 if (msg
->hdr
.flags
.test_message
== 1)
2038 * parahotplug_process_list() - remove any request from the list that's been on
2039 * there too long and respond with an error
2042 parahotplug_process_list(void)
2044 struct list_head
*pos
;
2045 struct list_head
*tmp
;
2047 spin_lock(¶hotplug_request_list_lock
);
2049 list_for_each_safe(pos
, tmp
, ¶hotplug_request_list
) {
2050 struct parahotplug_request
*req
=
2051 list_entry(pos
, struct parahotplug_request
, list
);
2053 if (!time_after_eq(jiffies
, req
->expiration
))
2057 if (req
->msg
.hdr
.flags
.response_expected
)
2058 controlvm_respond_physdev_changestate(
2060 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT
,
2061 req
->msg
.cmd
.device_change_state
.state
);
2062 parahotplug_request_destroy(req
);
2065 spin_unlock(¶hotplug_request_list_lock
);
2069 controlvm_periodic_work(struct work_struct
*work
)
2071 struct controlvm_message inmsg
;
2072 bool got_command
= false;
2073 bool handle_command_failed
= false;
2075 while (!visorchannel_signalremove(controlvm_channel
,
2076 CONTROLVM_QUEUE_RESPONSE
,
2080 if (controlvm_pending_msg_valid
) {
2082 * we throttled processing of a prior
2083 * msg, so try to process it again
2084 * rather than reading a new one
2086 inmsg
= controlvm_pending_msg
;
2087 controlvm_pending_msg_valid
= false;
2090 got_command
= read_controlvm_event(&inmsg
);
2094 handle_command_failed
= false;
2095 while (got_command
&& (!handle_command_failed
)) {
2096 most_recent_message_jiffies
= jiffies
;
2097 if (handle_command(inmsg
,
2098 visorchannel_get_physaddr
2099 (controlvm_channel
)))
2100 got_command
= read_controlvm_event(&inmsg
);
2103 * this is a scenario where throttling
2104 * is required, but probably NOT an
2105 * error...; we stash the current
2106 * controlvm msg so we will attempt to
2107 * reprocess it on our next loop
2109 handle_command_failed
= true;
2110 controlvm_pending_msg
= inmsg
;
2111 controlvm_pending_msg_valid
= true;
2115 /* parahotplug_worker */
2116 parahotplug_process_list();
2118 if (time_after(jiffies
,
2119 most_recent_message_jiffies
+ (HZ
* MIN_IDLE_SECONDS
))) {
2121 * it's been longer than MIN_IDLE_SECONDS since we
2122 * processed our last controlvm message; slow down the
2125 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
)
2126 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_SLOW
;
2128 if (poll_jiffies
!= POLLJIFFIES_CONTROLVMCHANNEL_FAST
)
2129 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2132 schedule_delayed_work(&periodic_controlvm_work
, poll_jiffies
);
2136 visorchipset_init(struct acpi_device
*acpi_device
)
2140 uuid_le uuid
= SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID
;
2142 addr
= controlvm_get_channel_address();
2146 controlvm_channel
= visorchannel_create_with_lock(addr
, 0,
2148 if (!controlvm_channel
)
2151 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2152 visorchannel_get_header(controlvm_channel
))) {
2153 initialize_controlvm_payload();
2155 goto error_destroy_channel
;
2158 major_dev
= MKDEV(visorchipset_major
, 0);
2159 err
= visorchipset_file_init(major_dev
, &controlvm_channel
);
2161 goto error_destroy_payload
;
2163 /* if booting in a crash kernel */
2164 if (is_kdump_kernel())
2165 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2166 setup_crash_devices_work_queue
);
2168 INIT_DELAYED_WORK(&periodic_controlvm_work
,
2169 controlvm_periodic_work
);
2171 most_recent_message_jiffies
= jiffies
;
2172 poll_jiffies
= POLLJIFFIES_CONTROLVMCHANNEL_FAST
;
2173 schedule_delayed_work(&periodic_controlvm_work
, poll_jiffies
);
2175 visorchipset_platform_device
.dev
.devt
= major_dev
;
2176 if (platform_device_register(&visorchipset_platform_device
) < 0) {
2177 POSTCODE_LINUX(DEVICE_REGISTER_FAILURE_PC
, 0, 0,
2180 goto error_cancel_work
;
2182 POSTCODE_LINUX(CHIPSET_INIT_SUCCESS_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
2184 err
= visorbus_init();
2186 goto error_unregister
;
2191 platform_device_unregister(&visorchipset_platform_device
);
2194 cancel_delayed_work_sync(&periodic_controlvm_work
);
2195 visorchipset_file_cleanup(major_dev
);
2197 error_destroy_payload
:
2198 destroy_controlvm_payload_info(&controlvm_payload_info
);
2200 error_destroy_channel
:
2201 visorchannel_destroy(controlvm_channel
);
2204 POSTCODE_LINUX(CHIPSET_INIT_FAILURE_PC
, 0, err
, POSTCODE_SEVERITY_ERR
);
2209 visorchipset_exit(struct acpi_device
*acpi_device
)
2211 POSTCODE_LINUX(DRIVER_EXIT_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
2215 cancel_delayed_work_sync(&periodic_controlvm_work
);
2216 destroy_controlvm_payload_info(&controlvm_payload_info
);
2218 visorchannel_destroy(controlvm_channel
);
2220 visorchipset_file_cleanup(visorchipset_platform_device
.dev
.devt
);
2221 platform_device_unregister(&visorchipset_platform_device
);
2222 POSTCODE_LINUX(DRIVER_EXIT_PC
, 0, 0, POSTCODE_SEVERITY_INFO
);
2227 static const struct acpi_device_id unisys_device_ids
[] = {
2232 static struct acpi_driver unisys_acpi_driver
= {
2233 .name
= "unisys_acpi",
2234 .class = "unisys_acpi_class",
2235 .owner
= THIS_MODULE
,
2236 .ids
= unisys_device_ids
,
2238 .add
= visorchipset_init
,
2239 .remove
= visorchipset_exit
,
2243 MODULE_DEVICE_TABLE(acpi
, unisys_device_ids
);
2245 static __init
uint32_t visorutil_spar_detect(void)
2247 unsigned int eax
, ebx
, ecx
, edx
;
2249 if (boot_cpu_has(X86_FEATURE_HYPERVISOR
)) {
2251 cpuid(UNISYS_SPAR_LEAF_ID
, &eax
, &ebx
, &ecx
, &edx
);
2252 return (ebx
== UNISYS_SPAR_ID_EBX
) &&
2253 (ecx
== UNISYS_SPAR_ID_ECX
) &&
2254 (edx
== UNISYS_SPAR_ID_EDX
);
2260 static int init_unisys(void)
2264 if (!visorutil_spar_detect())
2267 result
= acpi_bus_register_driver(&unisys_acpi_driver
);
2271 pr_info("Unisys Visorchipset Driver Loaded.\n");
2275 static void exit_unisys(void)
2277 acpi_bus_unregister_driver(&unisys_acpi_driver
);
2280 module_param_named(major
, visorchipset_major
, int, S_IRUGO
);
2281 MODULE_PARM_DESC(visorchipset_major
,
2282 "major device number to use for the device node");
2284 module_init(init_unisys
);
2285 module_exit(exit_unisys
);
2287 MODULE_AUTHOR("Unisys");
2288 MODULE_LICENSE("GPL");
2289 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");