s390x/pci: enforce zPCI state checking
[qemu.git] / hw / s390x / s390-pci-inst.c
blob744f43587c31067ece98d7ce377501342a9d5b91
1 /*
2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "cpu.h"
17 #include "s390-pci-inst.h"
18 #include "s390-pci-bus.h"
19 #include <exec/memory-internal.h>
20 #include <qemu/error-report.h>
22 /* #define DEBUG_S390PCI_INST */
23 #ifdef DEBUG_S390PCI_INST
24 #define DPRINTF(fmt, ...) \
25 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
26 #else
27 #define DPRINTF(fmt, ...) \
28 do { } while (0)
29 #endif
31 static void s390_set_status_code(CPUS390XState *env,
32 uint8_t r, uint64_t status_code)
34 env->regs[r] &= ~0xff000000ULL;
35 env->regs[r] |= (status_code & 0xff) << 24;
38 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
40 S390PCIBusDevice *pbdev;
41 uint32_t res_code, initial_l2, g_l2, finish;
42 int rc, idx;
43 uint64_t resume_token;
45 rc = 0;
46 if (lduw_p(&rrb->request.hdr.len) != 32) {
47 res_code = CLP_RC_LEN;
48 rc = -EINVAL;
49 goto out;
52 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
53 res_code = CLP_RC_FMT;
54 rc = -EINVAL;
55 goto out;
58 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
59 ldq_p(&rrb->request.reserved1) != 0 ||
60 ldq_p(&rrb->request.reserved2) != 0) {
61 res_code = CLP_RC_RESNOT0;
62 rc = -EINVAL;
63 goto out;
66 resume_token = ldq_p(&rrb->request.resume_token);
68 if (resume_token) {
69 pbdev = s390_pci_find_dev_by_idx(resume_token);
70 if (!pbdev) {
71 res_code = CLP_RC_LISTPCI_BADRT;
72 rc = -EINVAL;
73 goto out;
77 if (lduw_p(&rrb->response.hdr.len) < 48) {
78 res_code = CLP_RC_8K;
79 rc = -EINVAL;
80 goto out;
83 initial_l2 = lduw_p(&rrb->response.hdr.len);
84 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
85 != 0) {
86 res_code = CLP_RC_LEN;
87 rc = -EINVAL;
88 *cc = 3;
89 goto out;
92 stl_p(&rrb->response.fmt, 0);
93 stq_p(&rrb->response.reserved1, 0);
94 stq_p(&rrb->response.reserved2, 0);
95 stl_p(&rrb->response.mdd, FH_MASK_SHM);
96 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
97 rrb->response.entry_size = sizeof(ClpFhListEntry);
98 finish = 0;
99 idx = resume_token;
100 g_l2 = LIST_PCI_HDR_LEN;
101 do {
102 pbdev = s390_pci_find_dev_by_idx(idx);
103 if (!pbdev) {
104 finish = 1;
105 break;
107 stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
108 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
109 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
110 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
111 /* Ignore RESERVED devices. */
112 stl_p(&rrb->response.fh_list[idx - resume_token].config,
113 pbdev->state == ZPCI_FS_STANDBY ? 0 : 1 << 31);
114 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
115 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
117 g_l2 += sizeof(ClpFhListEntry);
118 /* Add endian check for DPRINTF? */
119 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
120 g_l2,
121 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
122 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
123 ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
124 ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
125 idx++;
126 } while (g_l2 < initial_l2);
128 if (finish == 1) {
129 resume_token = 0;
130 } else {
131 resume_token = idx;
133 stq_p(&rrb->response.resume_token, resume_token);
134 stw_p(&rrb->response.hdr.len, g_l2);
135 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
136 out:
137 if (rc) {
138 DPRINTF("list pci failed rc 0x%x\n", rc);
139 stw_p(&rrb->response.hdr.rsp, res_code);
141 return rc;
144 int clp_service_call(S390CPU *cpu, uint8_t r2)
146 ClpReqHdr *reqh;
147 ClpRspHdr *resh;
148 S390PCIBusDevice *pbdev;
149 uint32_t req_len;
150 uint32_t res_len;
151 uint8_t buffer[4096 * 2];
152 uint8_t cc = 0;
153 CPUS390XState *env = &cpu->env;
154 int i;
156 cpu_synchronize_state(CPU(cpu));
158 if (env->psw.mask & PSW_MASK_PSTATE) {
159 program_interrupt(env, PGM_PRIVILEGED, 4);
160 return 0;
163 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
164 return 0;
166 reqh = (ClpReqHdr *)buffer;
167 req_len = lduw_p(&reqh->len);
168 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
169 program_interrupt(env, PGM_OPERAND, 4);
170 return 0;
173 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
174 req_len + sizeof(*resh))) {
175 return 0;
177 resh = (ClpRspHdr *)(buffer + req_len);
178 res_len = lduw_p(&resh->len);
179 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
180 program_interrupt(env, PGM_OPERAND, 4);
181 return 0;
183 if ((req_len + res_len) > 8192) {
184 program_interrupt(env, PGM_OPERAND, 4);
185 return 0;
188 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
189 req_len + res_len)) {
190 return 0;
193 if (req_len != 32) {
194 stw_p(&resh->rsp, CLP_RC_LEN);
195 goto out;
198 switch (lduw_p(&reqh->cmd)) {
199 case CLP_LIST_PCI: {
200 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
201 list_pci(rrb, &cc);
202 break;
204 case CLP_SET_PCI_FN: {
205 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
206 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
208 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
209 if (!pbdev) {
210 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
211 goto out;
214 switch (reqsetpci->oc) {
215 case CLP_SET_ENABLE_PCI_FN:
216 pbdev->fh |= FH_MASK_ENABLE;
217 pbdev->state = ZPCI_FS_ENABLED;
218 stl_p(&ressetpci->fh, pbdev->fh);
219 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
220 break;
221 case CLP_SET_DISABLE_PCI_FN:
222 pbdev->fh &= ~FH_MASK_ENABLE;
223 pbdev->state = ZPCI_FS_DISABLED;
224 stl_p(&ressetpci->fh, pbdev->fh);
225 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
226 break;
227 default:
228 DPRINTF("unknown set pci command\n");
229 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
230 break;
232 break;
234 case CLP_QUERY_PCI_FN: {
235 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
236 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
238 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
239 if (!pbdev) {
240 DPRINTF("query pci no pci dev\n");
241 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
242 goto out;
245 for (i = 0; i < PCI_BAR_COUNT; i++) {
246 uint32_t data = pci_get_long(pbdev->pdev->config +
247 PCI_BASE_ADDRESS_0 + (i * 4));
249 stl_p(&resquery->bar[i], data);
250 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
251 ctz64(pbdev->pdev->io_regions[i].size) : 0;
252 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
253 ldl_p(&resquery->bar[i]),
254 pbdev->pdev->io_regions[i].size,
255 resquery->bar_size[i]);
258 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
259 stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
260 stl_p(&resquery->fid, pbdev->fid);
261 stw_p(&resquery->pchid, 0);
262 stw_p(&resquery->ug, 1);
263 stl_p(&resquery->uid, pbdev->fid);
264 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
265 break;
267 case CLP_QUERY_PCI_FNGRP: {
268 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
269 resgrp->fr = 1;
270 stq_p(&resgrp->dasm, 0);
271 stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
272 stw_p(&resgrp->mui, 0);
273 stw_p(&resgrp->i, 128);
274 resgrp->version = 0;
276 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
277 break;
279 default:
280 DPRINTF("unknown clp command\n");
281 stw_p(&resh->rsp, CLP_RC_CMD);
282 break;
285 out:
286 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
287 req_len + res_len)) {
288 return 0;
290 setcc(cpu, cc);
291 return 0;
294 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
296 CPUS390XState *env = &cpu->env;
297 S390PCIBusDevice *pbdev;
298 uint64_t offset;
299 uint64_t data;
300 uint8_t len;
301 uint32_t fh;
302 uint8_t pcias;
304 cpu_synchronize_state(CPU(cpu));
306 if (env->psw.mask & PSW_MASK_PSTATE) {
307 program_interrupt(env, PGM_PRIVILEGED, 4);
308 return 0;
311 if (r2 & 0x1) {
312 program_interrupt(env, PGM_SPECIFICATION, 4);
313 return 0;
316 fh = env->regs[r2] >> 32;
317 pcias = (env->regs[r2] >> 16) & 0xf;
318 len = env->regs[r2] & 0xf;
319 offset = env->regs[r2 + 1];
321 pbdev = s390_pci_find_dev_by_fh(fh);
322 if (!pbdev) {
323 DPRINTF("pcilg no pci dev\n");
324 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
325 return 0;
328 switch (pbdev->state) {
329 case ZPCI_FS_RESERVED:
330 case ZPCI_FS_STANDBY:
331 case ZPCI_FS_DISABLED:
332 case ZPCI_FS_PERMANENT_ERROR:
333 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
334 return 0;
335 case ZPCI_FS_ERROR:
336 setcc(cpu, ZPCI_PCI_LS_ERR);
337 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
338 return 0;
339 default:
340 break;
343 if (pcias < 6) {
344 if ((8 - (offset & 0x7)) < len) {
345 program_interrupt(env, PGM_OPERAND, 4);
346 return 0;
348 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
349 memory_region_dispatch_read(mr, offset, &data, len,
350 MEMTXATTRS_UNSPECIFIED);
351 } else if (pcias == 15) {
352 if ((4 - (offset & 0x3)) < len) {
353 program_interrupt(env, PGM_OPERAND, 4);
354 return 0;
356 data = pci_host_config_read_common(
357 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
359 switch (len) {
360 case 1:
361 break;
362 case 2:
363 data = bswap16(data);
364 break;
365 case 4:
366 data = bswap32(data);
367 break;
368 case 8:
369 data = bswap64(data);
370 break;
371 default:
372 program_interrupt(env, PGM_OPERAND, 4);
373 return 0;
375 } else {
376 DPRINTF("invalid space\n");
377 setcc(cpu, ZPCI_PCI_LS_ERR);
378 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
379 return 0;
382 env->regs[r1] = data;
383 setcc(cpu, ZPCI_PCI_LS_OK);
384 return 0;
387 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
388 uint64_t *data, uint8_t len)
390 uint32_t val;
391 uint8_t *msg_data;
393 if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
394 return;
397 if (len != 4) {
398 DPRINTF("access msix table msg data but len is %d\n", len);
399 return;
402 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
403 PCI_MSIX_ENTRY_VECTOR_CTRL;
404 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
405 pci_set_long(msg_data, val);
406 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
409 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
411 if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
412 offset >= pbdev->msix.table_offset &&
413 offset <= pbdev->msix.table_offset +
414 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
415 return 1;
416 } else {
417 return 0;
421 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
423 CPUS390XState *env = &cpu->env;
424 uint64_t offset, data;
425 S390PCIBusDevice *pbdev;
426 uint8_t len;
427 uint32_t fh;
428 uint8_t pcias;
430 cpu_synchronize_state(CPU(cpu));
432 if (env->psw.mask & PSW_MASK_PSTATE) {
433 program_interrupt(env, PGM_PRIVILEGED, 4);
434 return 0;
437 if (r2 & 0x1) {
438 program_interrupt(env, PGM_SPECIFICATION, 4);
439 return 0;
442 fh = env->regs[r2] >> 32;
443 pcias = (env->regs[r2] >> 16) & 0xf;
444 len = env->regs[r2] & 0xf;
445 offset = env->regs[r2 + 1];
447 pbdev = s390_pci_find_dev_by_fh(fh);
448 if (!pbdev) {
449 DPRINTF("pcistg no pci dev\n");
450 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
451 return 0;
454 switch (pbdev->state) {
455 case ZPCI_FS_RESERVED:
456 case ZPCI_FS_STANDBY:
457 case ZPCI_FS_DISABLED:
458 case ZPCI_FS_PERMANENT_ERROR:
459 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
460 return 0;
461 case ZPCI_FS_ERROR:
462 setcc(cpu, ZPCI_PCI_LS_ERR);
463 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
464 return 0;
465 default:
466 break;
469 data = env->regs[r1];
470 if (pcias < 6) {
471 if ((8 - (offset & 0x7)) < len) {
472 program_interrupt(env, PGM_OPERAND, 4);
473 return 0;
475 MemoryRegion *mr;
476 if (trap_msix(pbdev, offset, pcias)) {
477 offset = offset - pbdev->msix.table_offset;
478 mr = &pbdev->pdev->msix_table_mmio;
479 update_msix_table_msg_data(pbdev, offset, &data, len);
480 } else {
481 mr = pbdev->pdev->io_regions[pcias].memory;
484 memory_region_dispatch_write(mr, offset, data, len,
485 MEMTXATTRS_UNSPECIFIED);
486 } else if (pcias == 15) {
487 if ((4 - (offset & 0x3)) < len) {
488 program_interrupt(env, PGM_OPERAND, 4);
489 return 0;
491 switch (len) {
492 case 1:
493 break;
494 case 2:
495 data = bswap16(data);
496 break;
497 case 4:
498 data = bswap32(data);
499 break;
500 case 8:
501 data = bswap64(data);
502 break;
503 default:
504 program_interrupt(env, PGM_OPERAND, 4);
505 return 0;
508 pci_host_config_write_common(pbdev->pdev, offset,
509 pci_config_size(pbdev->pdev),
510 data, len);
511 } else {
512 DPRINTF("pcistg invalid space\n");
513 setcc(cpu, ZPCI_PCI_LS_ERR);
514 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
515 return 0;
518 setcc(cpu, ZPCI_PCI_LS_OK);
519 return 0;
522 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
524 CPUS390XState *env = &cpu->env;
525 uint32_t fh;
526 S390PCIBusDevice *pbdev;
527 hwaddr start, end;
528 IOMMUTLBEntry entry;
529 MemoryRegion *mr;
531 cpu_synchronize_state(CPU(cpu));
533 if (env->psw.mask & PSW_MASK_PSTATE) {
534 program_interrupt(env, PGM_PRIVILEGED, 4);
535 goto out;
538 if (r2 & 0x1) {
539 program_interrupt(env, PGM_SPECIFICATION, 4);
540 goto out;
543 fh = env->regs[r1] >> 32;
544 start = env->regs[r2];
545 end = start + env->regs[r2 + 1];
547 pbdev = s390_pci_find_dev_by_fh(fh);
548 if (!pbdev) {
549 DPRINTF("rpcit no pci dev\n");
550 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
551 goto out;
554 switch (pbdev->state) {
555 case ZPCI_FS_RESERVED:
556 case ZPCI_FS_STANDBY:
557 case ZPCI_FS_DISABLED:
558 case ZPCI_FS_PERMANENT_ERROR:
559 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
560 return 0;
561 case ZPCI_FS_ERROR:
562 setcc(cpu, ZPCI_PCI_LS_ERR);
563 s390_set_status_code(env, r1, ZPCI_MOD_ST_ERROR_RECOVER);
564 return 0;
565 default:
566 break;
569 if (!pbdev->g_iota) {
570 pbdev->state = ZPCI_FS_ERROR;
571 setcc(cpu, ZPCI_PCI_LS_ERR);
572 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
573 s390_pci_generate_error_event(ERR_EVENT_INVALAS, pbdev->fh, pbdev->fid,
574 start, 0);
575 goto out;
578 if (end < pbdev->pba || start > pbdev->pal) {
579 pbdev->state = ZPCI_FS_ERROR;
580 setcc(cpu, ZPCI_PCI_LS_ERR);
581 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
582 s390_pci_generate_error_event(ERR_EVENT_OORANGE, pbdev->fh, pbdev->fid,
583 start, 0);
584 goto out;
587 mr = &pbdev->iommu_mr;
588 while (start < end) {
589 entry = mr->iommu_ops->translate(mr, start, 0);
591 if (!entry.translated_addr) {
592 pbdev->state = ZPCI_FS_ERROR;
593 setcc(cpu, ZPCI_PCI_LS_ERR);
594 s390_set_status_code(env, r1, ZPCI_PCI_ST_INSUF_RES);
595 s390_pci_generate_error_event(ERR_EVENT_SERR, pbdev->fh, pbdev->fid,
596 start, ERR_EVENT_Q_BIT);
597 goto out;
600 memory_region_notify_iommu(mr, entry);
601 start += entry.addr_mask + 1;
604 setcc(cpu, ZPCI_PCI_LS_OK);
605 out:
606 return 0;
609 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
610 uint8_t ar)
612 CPUS390XState *env = &cpu->env;
613 S390PCIBusDevice *pbdev;
614 MemoryRegion *mr;
615 int i;
616 uint32_t fh;
617 uint8_t pcias;
618 uint8_t len;
619 uint8_t buffer[128];
621 if (env->psw.mask & PSW_MASK_PSTATE) {
622 program_interrupt(env, PGM_PRIVILEGED, 6);
623 return 0;
626 fh = env->regs[r1] >> 32;
627 pcias = (env->regs[r1] >> 16) & 0xf;
628 len = env->regs[r1] & 0xff;
630 if (pcias > 5) {
631 DPRINTF("pcistb invalid space\n");
632 setcc(cpu, ZPCI_PCI_LS_ERR);
633 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
634 return 0;
637 switch (len) {
638 case 16:
639 case 32:
640 case 64:
641 case 128:
642 break;
643 default:
644 program_interrupt(env, PGM_SPECIFICATION, 6);
645 return 0;
648 pbdev = s390_pci_find_dev_by_fh(fh);
649 if (!pbdev) {
650 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
651 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
652 return 0;
655 switch (pbdev->state) {
656 case ZPCI_FS_RESERVED:
657 case ZPCI_FS_STANDBY:
658 case ZPCI_FS_DISABLED:
659 case ZPCI_FS_PERMANENT_ERROR:
660 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
661 return 0;
662 case ZPCI_FS_ERROR:
663 setcc(cpu, ZPCI_PCI_LS_ERR);
664 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
665 return 0;
666 default:
667 break;
670 mr = pbdev->pdev->io_regions[pcias].memory;
671 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
672 program_interrupt(env, PGM_ADDRESSING, 6);
673 return 0;
676 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
677 return 0;
680 for (i = 0; i < len / 8; i++) {
681 memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
682 ldq_p(buffer + i * 8), 8,
683 MEMTXATTRS_UNSPECIFIED);
686 setcc(cpu, ZPCI_PCI_LS_OK);
687 return 0;
690 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
692 int ret, len;
694 ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
695 FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
696 &pbdev->routes.adapter.adapter_id);
697 assert(ret == 0);
699 pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
700 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
701 pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
703 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
704 if (ret) {
705 goto out;
708 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
709 if (ret) {
710 goto out;
713 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
714 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
715 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
716 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
717 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
718 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
719 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
721 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
722 return 0;
723 out:
724 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
725 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
726 pbdev->summary_ind = NULL;
727 pbdev->indicator = NULL;
728 return ret;
731 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
733 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
734 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
736 pbdev->summary_ind = NULL;
737 pbdev->indicator = NULL;
738 pbdev->routes.adapter.summary_addr = 0;
739 pbdev->routes.adapter.summary_offset = 0;
740 pbdev->routes.adapter.ind_addr = 0;
741 pbdev->routes.adapter.ind_offset = 0;
742 pbdev->isc = 0;
743 pbdev->noi = 0;
744 pbdev->sum = 0;
746 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
747 return 0;
750 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
752 uint64_t pba = ldq_p(&fib.pba);
753 uint64_t pal = ldq_p(&fib.pal);
754 uint64_t g_iota = ldq_p(&fib.iota);
755 uint8_t dt = (g_iota >> 2) & 0x7;
756 uint8_t t = (g_iota >> 11) & 0x1;
758 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
759 program_interrupt(env, PGM_OPERAND, 6);
760 return -EINVAL;
763 /* currently we only support designation type 1 with translation */
764 if (!(dt == ZPCI_IOTA_RTTO && t)) {
765 error_report("unsupported ioat dt %d t %d", dt, t);
766 program_interrupt(env, PGM_OPERAND, 6);
767 return -EINVAL;
770 pbdev->pba = pba;
771 pbdev->pal = pal;
772 pbdev->g_iota = g_iota;
774 s390_pci_iommu_enable(pbdev);
776 return 0;
779 void pci_dereg_ioat(S390PCIBusDevice *pbdev)
781 s390_pci_iommu_disable(pbdev);
782 pbdev->pba = 0;
783 pbdev->pal = 0;
784 pbdev->g_iota = 0;
787 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
789 CPUS390XState *env = &cpu->env;
790 uint8_t oc, dmaas;
791 uint32_t fh;
792 ZpciFib fib;
793 S390PCIBusDevice *pbdev;
794 uint64_t cc = ZPCI_PCI_LS_OK;
796 if (env->psw.mask & PSW_MASK_PSTATE) {
797 program_interrupt(env, PGM_PRIVILEGED, 6);
798 return 0;
801 oc = env->regs[r1] & 0xff;
802 dmaas = (env->regs[r1] >> 16) & 0xff;
803 fh = env->regs[r1] >> 32;
805 if (fiba & 0x7) {
806 program_interrupt(env, PGM_SPECIFICATION, 6);
807 return 0;
810 pbdev = s390_pci_find_dev_by_fh(fh);
811 if (!pbdev) {
812 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
813 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
814 return 0;
817 switch (pbdev->state) {
818 case ZPCI_FS_RESERVED:
819 case ZPCI_FS_STANDBY:
820 case ZPCI_FS_DISABLED:
821 case ZPCI_FS_PERMANENT_ERROR:
822 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
823 return 0;
824 default:
825 break;
828 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
829 return 0;
832 if (fib.fmt != 0) {
833 program_interrupt(env, PGM_OPERAND, 6);
834 return 0;
837 switch (oc) {
838 case ZPCI_MOD_FC_REG_INT:
839 if (pbdev->summary_ind) {
840 cc = ZPCI_PCI_LS_ERR;
841 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
842 } else if (reg_irqs(env, pbdev, fib)) {
843 cc = ZPCI_PCI_LS_ERR;
844 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
846 break;
847 case ZPCI_MOD_FC_DEREG_INT:
848 if (!pbdev->summary_ind) {
849 cc = ZPCI_PCI_LS_ERR;
850 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
851 } else {
852 pci_dereg_irqs(pbdev);
854 break;
855 case ZPCI_MOD_FC_REG_IOAT:
856 if (dmaas != 0) {
857 cc = ZPCI_PCI_LS_ERR;
858 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
859 } else if (pbdev->iommu_enabled) {
860 cc = ZPCI_PCI_LS_ERR;
861 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
862 } else if (reg_ioat(env, pbdev, fib)) {
863 cc = ZPCI_PCI_LS_ERR;
864 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
866 break;
867 case ZPCI_MOD_FC_DEREG_IOAT:
868 if (dmaas != 0) {
869 cc = ZPCI_PCI_LS_ERR;
870 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
871 } else if (!pbdev->iommu_enabled) {
872 cc = ZPCI_PCI_LS_ERR;
873 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
874 } else {
875 pci_dereg_ioat(pbdev);
877 break;
878 case ZPCI_MOD_FC_REREG_IOAT:
879 if (dmaas != 0) {
880 cc = ZPCI_PCI_LS_ERR;
881 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
882 } else if (!pbdev->iommu_enabled) {
883 cc = ZPCI_PCI_LS_ERR;
884 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
885 } else {
886 pci_dereg_ioat(pbdev);
887 if (reg_ioat(env, pbdev, fib)) {
888 cc = ZPCI_PCI_LS_ERR;
889 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
892 break;
893 case ZPCI_MOD_FC_RESET_ERROR:
894 switch (pbdev->state) {
895 case ZPCI_FS_BLOCKED:
896 case ZPCI_FS_ERROR:
897 pbdev->state = ZPCI_FS_ENABLED;
898 break;
899 default:
900 cc = ZPCI_PCI_LS_ERR;
901 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
903 break;
904 case ZPCI_MOD_FC_RESET_BLOCK:
905 switch (pbdev->state) {
906 case ZPCI_FS_ERROR:
907 pbdev->state = ZPCI_FS_BLOCKED;
908 break;
909 default:
910 cc = ZPCI_PCI_LS_ERR;
911 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
913 break;
914 case ZPCI_MOD_FC_SET_MEASURE:
915 pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
916 break;
917 default:
918 program_interrupt(&cpu->env, PGM_OPERAND, 6);
919 cc = ZPCI_PCI_LS_ERR;
922 setcc(cpu, cc);
923 return 0;
926 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
928 CPUS390XState *env = &cpu->env;
929 uint32_t fh;
930 ZpciFib fib;
931 S390PCIBusDevice *pbdev;
932 uint32_t data;
933 uint64_t cc = ZPCI_PCI_LS_OK;
935 if (env->psw.mask & PSW_MASK_PSTATE) {
936 program_interrupt(env, PGM_PRIVILEGED, 6);
937 return 0;
940 fh = env->regs[r1] >> 32;
942 if (fiba & 0x7) {
943 program_interrupt(env, PGM_SPECIFICATION, 6);
944 return 0;
947 pbdev = s390_pci_find_dev_by_fh(fh);
948 if (!pbdev) {
949 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
950 return 0;
953 memset(&fib, 0, sizeof(fib));
955 switch (pbdev->state) {
956 case ZPCI_FS_RESERVED:
957 case ZPCI_FS_STANDBY:
958 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
959 return 0;
960 case ZPCI_FS_DISABLED:
961 if (fh & FH_MASK_ENABLE) {
962 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
963 return 0;
965 goto out;
966 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
967 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
968 case ZPCI_FS_ERROR:
969 fib.fc |= 0x20;
970 case ZPCI_FS_BLOCKED:
971 fib.fc |= 0x40;
972 case ZPCI_FS_ENABLED:
973 fib.fc |= 0x80;
974 if (pbdev->iommu_enabled) {
975 fib.fc |= 0x10;
977 if (!(fh & FH_MASK_ENABLE)) {
978 env->regs[r1] |= 1ULL << 63;
980 break;
981 case ZPCI_FS_PERMANENT_ERROR:
982 setcc(cpu, ZPCI_PCI_LS_ERR);
983 s390_set_status_code(env, r1, ZPCI_STPCIFC_ST_PERM_ERROR);
984 return 0;
987 stq_p(&fib.pba, pbdev->pba);
988 stq_p(&fib.pal, pbdev->pal);
989 stq_p(&fib.iota, pbdev->g_iota);
990 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
991 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
992 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
994 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
995 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
996 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
997 stl_p(&fib.data, data);
999 out:
1000 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
1001 return 0;
1004 setcc(cpu, cc);
1005 return 0;