4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This work is licensed under the terms of the GNU GPL, version 2 or (at
7 * your option) any later version. See the COPYING file in the top-level
14 static struct vring block
;
16 static char chsc_page
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
18 static long kvm_hypercall(unsigned long nr
, unsigned long param1
,
21 register ulong r_nr
asm("1") = nr
;
22 register ulong r_param1
asm("2") = param1
;
23 register ulong r_param2
asm("3") = param2
;
24 register long retval
asm("2");
26 asm volatile ("diag 2,4,0x500"
28 : "d" (r_nr
), "0" (r_param1
), "r"(r_param2
)
34 static void virtio_notify(struct subchannel_id schid
)
36 kvm_hypercall(KVM_S390_VIRTIO_CCW_NOTIFY
, *(u32
*)&schid
, 0);
39 /***********************************************
41 ***********************************************/
43 static int drain_irqs(struct subchannel_id schid
)
49 /* FIXME: make use of TPI, for that enable subchannel and isc */
50 if (tsch(schid
, &irb
)) {
51 /* Might want to differentiate error codes later on. */
54 } else if (irb
.scsw
.dstat
!= 0xc) {
62 static int run_ccw(struct subchannel_id schid
, int cmd
, void *ptr
, int len
)
65 struct cmd_orb orb
= {};
69 /* start command processing */
70 stsch_err(schid
, &schib
);
71 schib
.scsw
.ctrl
= SCSW_FCTL_START_FUNC
;
74 /* start subchannel command */
76 orb
.cpa
= (u32
)(long)&ccw
;
83 r
= ssch(schid
, &orb
);
85 * XXX Wait until device is done processing the CCW. For now we can
86 * assume that a simple tsch will have finished the CCW processing,
87 * but the architecture allows for asynchronous operation
90 r
= drain_irqs(schid
);
95 static void virtio_set_status(struct subchannel_id schid
,
96 unsigned long dev_addr
)
98 unsigned char status
= dev_addr
;
99 if (run_ccw(schid
, CCW_CMD_WRITE_STATUS
, &status
, sizeof(status
))) {
100 virtio_panic("Could not write status to host!\n");
104 static void virtio_reset(struct subchannel_id schid
)
106 run_ccw(schid
, CCW_CMD_VDEV_RESET
, NULL
, 0);
109 static void vring_init(struct vring
*vr
, unsigned int num
, void *p
,
112 debug_print_addr("init p", p
);
115 vr
->avail
= p
+ num
*sizeof(struct vring_desc
);
116 vr
->used
= (void *)(((unsigned long)&vr
->avail
->ring
[num
] + align
-1)
119 /* Zero out all relevant field */
120 vr
->avail
->flags
= 0;
123 /* We're running with interrupts off anyways, so don't bother */
124 vr
->used
->flags
= VRING_USED_F_NO_NOTIFY
;
129 debug_print_addr("init vr", vr
);
132 static void vring_notify(struct subchannel_id schid
)
134 virtio_notify(schid
);
137 static void vring_send_buf(struct vring
*vr
, void *p
, int len
, int flags
)
139 /* For follow-up chains we need to keep the first entry point */
140 if (!(flags
& VRING_HIDDEN_IS_CHAIN
)) {
141 vr
->avail
->ring
[vr
->avail
->idx
% vr
->num
] = vr
->next_idx
;
144 vr
->desc
[vr
->next_idx
].addr
= (ulong
)p
;
145 vr
->desc
[vr
->next_idx
].len
= len
;
146 vr
->desc
[vr
->next_idx
].flags
= flags
& ~VRING_HIDDEN_IS_CHAIN
;
147 vr
->desc
[vr
->next_idx
].next
= vr
->next_idx
;
148 vr
->desc
[vr
->next_idx
].next
++;
151 /* Chains only have a single ID */
152 if (!(flags
& VRING_DESC_F_NEXT
)) {
157 static u64
get_clock(void)
161 asm volatile("stck %0" : "=Q" (r
) : : "cc");
165 static ulong
get_second(void)
167 return (get_clock() >> 12) / 1000000;
171 * Wait for the host to reply.
173 * timeout is in seconds if > 0.
175 * Returns 0 on success, 1 on timeout.
177 static int vring_wait_reply(struct vring
*vr
, int timeout
)
179 ulong target_second
= get_second() + timeout
;
180 struct subchannel_id schid
= vr
->schid
;
183 /* Wait until the used index has moved. */
184 while (vr
->used
->idx
== vr
->used_idx
) {
186 if (timeout
&& (get_second() >= target_second
)) {
193 vr
->used_idx
= vr
->used
->idx
;
196 vr
->desc
[0].flags
= 0;
201 /***********************************************
203 ***********************************************/
205 int virtio_read_many(ulong sector
, void *load_addr
, int sec_num
)
207 struct virtio_blk_outhdr out_hdr
;
211 /* Tell the host we want to read */
212 out_hdr
.type
= VIRTIO_BLK_T_IN
;
214 out_hdr
.sector
= virtio_sector_adjust(sector
);
216 vring_send_buf(&block
, &out_hdr
, sizeof(out_hdr
), VRING_DESC_F_NEXT
);
218 /* This is where we want to receive data */
219 vring_send_buf(&block
, load_addr
, virtio_get_block_size() * sec_num
,
220 VRING_DESC_F_WRITE
| VRING_HIDDEN_IS_CHAIN
|
224 vring_send_buf(&block
, &status
, sizeof(u8
), VRING_DESC_F_WRITE
|
225 VRING_HIDDEN_IS_CHAIN
);
227 /* Now we can tell the host to read */
228 vring_wait_reply(&block
, 0);
230 r
= drain_irqs(block
.schid
);
232 /* Well, whatever status is supposed to contain... */
238 unsigned long virtio_load_direct(ulong rec_list1
, ulong rec_list2
,
239 ulong subchan_id
, void *load_addr
)
243 int sec_num
= ((rec_list2
>> 32) & 0xffff) + 1;
244 int sec_len
= rec_list2
>> 48;
245 ulong addr
= (ulong
)load_addr
;
247 if (sec_len
!= virtio_get_block_size()) {
252 status
= virtio_read_many(sec
, (void *)addr
, sec_num
);
254 virtio_panic("I/O Error");
256 addr
+= sec_num
* virtio_get_block_size();
261 int virtio_read(ulong sector
, void *load_addr
)
263 return virtio_read_many(sector
, load_addr
, 1);
266 static VirtioBlkConfig blk_cfg
= {};
267 static bool guessed_disk_nature
;
269 bool virtio_guessed_disk_nature(void)
271 return guessed_disk_nature
;
274 void virtio_assume_scsi(void)
276 guessed_disk_nature
= true;
277 blk_cfg
.blk_size
= 512;
278 blk_cfg
.physical_block_exp
= 0;
281 void virtio_assume_iso9660(void)
283 guessed_disk_nature
= true;
284 blk_cfg
.blk_size
= 2048;
285 blk_cfg
.physical_block_exp
= 0;
288 void virtio_assume_eckd(void)
290 guessed_disk_nature
= true;
291 blk_cfg
.blk_size
= 4096;
292 blk_cfg
.physical_block_exp
= 0;
294 /* this must be here to calculate code segment position */
295 blk_cfg
.geometry
.heads
= 15;
296 blk_cfg
.geometry
.sectors
= 12;
299 bool virtio_disk_is_scsi(void)
301 if (guessed_disk_nature
) {
302 return (virtio_get_block_size() == 512);
304 return (blk_cfg
.geometry
.heads
== 255)
305 && (blk_cfg
.geometry
.sectors
== 63)
306 && (virtio_get_block_size() == 512);
310 * Other supported value pairs, if any, would need to be added here.
311 * Note: head count is always 15.
313 static inline u8
virtio_eckd_sectors_for_block_size(int size
)
328 bool virtio_disk_is_eckd(void)
330 const int block_size
= virtio_get_block_size();
332 if (guessed_disk_nature
) {
333 return (block_size
== 4096);
335 return (blk_cfg
.geometry
.heads
== 15)
336 && (blk_cfg
.geometry
.sectors
==
337 virtio_eckd_sectors_for_block_size(block_size
));
340 bool virtio_ipl_disk_is_valid(void)
342 return virtio_disk_is_scsi() || virtio_disk_is_eckd();
345 int virtio_get_block_size(void)
347 return blk_cfg
.blk_size
<< blk_cfg
.physical_block_exp
;
350 uint8_t virtio_get_heads(void)
352 return blk_cfg
.geometry
.heads
;
355 uint8_t virtio_get_sectors(void)
357 return blk_cfg
.geometry
.sectors
;
360 uint64_t virtio_get_blocks(void)
362 return blk_cfg
.capacity
/
363 (virtio_get_block_size() / VIRTIO_SECTOR_SIZE
);
366 void virtio_setup_block(struct subchannel_id schid
)
368 struct vq_info_block info
;
369 struct vq_config_block config
= {};
371 blk_cfg
.blk_size
= 0; /* mark "illegal" - setup started... */
372 guessed_disk_nature
= false;
377 * Skipping CCW_CMD_READ_FEAT. We're not doing anything fancy, and
378 * we'll just stop dead anyway if anything does not work like we
383 if (run_ccw(schid
, CCW_CMD_READ_VQ_CONF
, &config
, sizeof(config
))) {
384 virtio_panic("Could not get block device VQ configuration\n");
386 if (run_ccw(schid
, CCW_CMD_READ_CONF
, &blk_cfg
, sizeof(blk_cfg
))) {
387 virtio_panic("Could not get block device configuration\n");
389 vring_init(&block
, config
.num
, ring_area
,
390 KVM_S390_VIRTIO_RING_ALIGN
);
392 info
.queue
= (unsigned long long) ring_area
;
393 info
.align
= KVM_S390_VIRTIO_RING_ALIGN
;
395 info
.num
= config
.num
;
398 if (!run_ccw(schid
, CCW_CMD_SET_VQ
, &info
, sizeof(info
))) {
399 virtio_set_status(schid
, VIRTIO_CONFIG_S_DRIVER_OK
);
402 if (!virtio_ipl_disk_is_valid()) {
403 /* make sure all getters but blocksize return 0 for invalid IPL disk */
404 memset(&blk_cfg
, 0, sizeof(blk_cfg
));
405 virtio_assume_scsi();
409 bool virtio_is_blk(struct subchannel_id schid
)
412 struct senseid senseid
= {};
414 /* run sense id command */
415 r
= run_ccw(schid
, CCW_CMD_SENSE_ID
, &senseid
, sizeof(senseid
));
419 if ((senseid
.cu_type
!= 0x3832) || (senseid
.cu_model
!= VIRTIO_ID_BLOCK
)) {
426 int enable_mss_facility(void)
429 struct chsc_area_sda
*sda_area
= (struct chsc_area_sda
*) chsc_page
;
431 memset(sda_area
, 0, PAGE_SIZE
);
432 sda_area
->request
.length
= 0x0400;
433 sda_area
->request
.code
= 0x0031;
434 sda_area
->operation_code
= 0x2;
436 ret
= chsc(sda_area
);
437 if ((ret
== 0) && (sda_area
->response
.code
== 0x0001)) {