spapr_iommu: Provide a function to switch a TCE table to allowing VFIO
[qemu/ar7.git] / hw / s390x / s390-pci-inst.c
blobf9151a9afbbad1af93c40a0c925b7d79dc4f4f66
1 /*
2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "s390-pci-inst.h"
15 #include "s390-pci-bus.h"
16 #include <exec/memory-internal.h>
17 #include <qemu/error-report.h>
19 /* #define DEBUG_S390PCI_INST */
20 #ifdef DEBUG_S390PCI_INST
21 #define DPRINTF(fmt, ...) \
22 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
23 #else
24 #define DPRINTF(fmt, ...) \
25 do { } while (0)
26 #endif
28 static void s390_set_status_code(CPUS390XState *env,
29 uint8_t r, uint64_t status_code)
31 env->regs[r] &= ~0xff000000ULL;
32 env->regs[r] |= (status_code & 0xff) << 24;
35 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
37 S390PCIBusDevice *pbdev;
38 uint32_t res_code, initial_l2, g_l2, finish;
39 int rc, idx;
40 uint64_t resume_token;
42 rc = 0;
43 if (lduw_p(&rrb->request.hdr.len) != 32) {
44 res_code = CLP_RC_LEN;
45 rc = -EINVAL;
46 goto out;
49 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
50 res_code = CLP_RC_FMT;
51 rc = -EINVAL;
52 goto out;
55 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
56 ldq_p(&rrb->request.reserved1) != 0 ||
57 ldq_p(&rrb->request.reserved2) != 0) {
58 res_code = CLP_RC_RESNOT0;
59 rc = -EINVAL;
60 goto out;
63 resume_token = ldq_p(&rrb->request.resume_token);
65 if (resume_token) {
66 pbdev = s390_pci_find_dev_by_idx(resume_token);
67 if (!pbdev) {
68 res_code = CLP_RC_LISTPCI_BADRT;
69 rc = -EINVAL;
70 goto out;
74 if (lduw_p(&rrb->response.hdr.len) < 48) {
75 res_code = CLP_RC_8K;
76 rc = -EINVAL;
77 goto out;
80 initial_l2 = lduw_p(&rrb->response.hdr.len);
81 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
82 != 0) {
83 res_code = CLP_RC_LEN;
84 rc = -EINVAL;
85 *cc = 3;
86 goto out;
89 stl_p(&rrb->response.fmt, 0);
90 stq_p(&rrb->response.reserved1, 0);
91 stq_p(&rrb->response.reserved2, 0);
92 stl_p(&rrb->response.mdd, FH_VIRT);
93 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
94 rrb->response.entry_size = sizeof(ClpFhListEntry);
95 finish = 0;
96 idx = resume_token;
97 g_l2 = LIST_PCI_HDR_LEN;
98 do {
99 pbdev = s390_pci_find_dev_by_idx(idx);
100 if (!pbdev) {
101 finish = 1;
102 break;
104 stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
105 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
106 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
107 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
108 stl_p(&rrb->response.fh_list[idx - resume_token].config, 0x80000000);
109 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
110 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
112 g_l2 += sizeof(ClpFhListEntry);
113 /* Add endian check for DPRINTF? */
114 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
115 g_l2,
116 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
117 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
118 ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
119 ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
120 idx++;
121 } while (g_l2 < initial_l2);
123 if (finish == 1) {
124 resume_token = 0;
125 } else {
126 resume_token = idx;
128 stq_p(&rrb->response.resume_token, resume_token);
129 stw_p(&rrb->response.hdr.len, g_l2);
130 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
131 out:
132 if (rc) {
133 DPRINTF("list pci failed rc 0x%x\n", rc);
134 stw_p(&rrb->response.hdr.rsp, res_code);
136 return rc;
139 int clp_service_call(S390CPU *cpu, uint8_t r2)
141 ClpReqHdr *reqh;
142 ClpRspHdr *resh;
143 S390PCIBusDevice *pbdev;
144 uint32_t req_len;
145 uint32_t res_len;
146 uint8_t buffer[4096 * 2];
147 uint8_t cc = 0;
148 CPUS390XState *env = &cpu->env;
149 int i;
151 cpu_synchronize_state(CPU(cpu));
153 if (env->psw.mask & PSW_MASK_PSTATE) {
154 program_interrupt(env, PGM_PRIVILEGED, 4);
155 return 0;
158 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
159 return 0;
161 reqh = (ClpReqHdr *)buffer;
162 req_len = lduw_p(&reqh->len);
163 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
164 program_interrupt(env, PGM_OPERAND, 4);
165 return 0;
168 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
169 req_len + sizeof(*resh))) {
170 return 0;
172 resh = (ClpRspHdr *)(buffer + req_len);
173 res_len = lduw_p(&resh->len);
174 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
175 program_interrupt(env, PGM_OPERAND, 4);
176 return 0;
178 if ((req_len + res_len) > 8192) {
179 program_interrupt(env, PGM_OPERAND, 4);
180 return 0;
183 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
184 req_len + res_len)) {
185 return 0;
188 if (req_len != 32) {
189 stw_p(&resh->rsp, CLP_RC_LEN);
190 goto out;
193 switch (lduw_p(&reqh->cmd)) {
194 case CLP_LIST_PCI: {
195 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
196 list_pci(rrb, &cc);
197 break;
199 case CLP_SET_PCI_FN: {
200 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
201 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
203 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
204 if (!pbdev) {
205 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
206 goto out;
209 switch (reqsetpci->oc) {
210 case CLP_SET_ENABLE_PCI_FN:
211 pbdev->fh = pbdev->fh | 1 << ENABLE_BIT_OFFSET;
212 stl_p(&ressetpci->fh, pbdev->fh);
213 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
214 break;
215 case CLP_SET_DISABLE_PCI_FN:
216 pbdev->fh = pbdev->fh & ~(1 << ENABLE_BIT_OFFSET);
217 pbdev->error_state = false;
218 pbdev->lgstg_blocked = false;
219 stl_p(&ressetpci->fh, pbdev->fh);
220 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
221 break;
222 default:
223 DPRINTF("unknown set pci command\n");
224 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
225 break;
227 break;
229 case CLP_QUERY_PCI_FN: {
230 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
231 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
233 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
234 if (!pbdev) {
235 DPRINTF("query pci no pci dev\n");
236 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
237 goto out;
240 for (i = 0; i < PCI_BAR_COUNT; i++) {
241 uint32_t data = pci_get_long(pbdev->pdev->config +
242 PCI_BASE_ADDRESS_0 + (i * 4));
244 stl_p(&resquery->bar[i], data);
245 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
246 ctz64(pbdev->pdev->io_regions[i].size) : 0;
247 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
248 ldl_p(&resquery->bar[i]),
249 pbdev->pdev->io_regions[i].size,
250 resquery->bar_size[i]);
253 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
254 stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
255 stw_p(&resquery->pchid, 0);
256 stw_p(&resquery->ug, 1);
257 stl_p(&resquery->uid, pbdev->fid);
258 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
259 break;
261 case CLP_QUERY_PCI_FNGRP: {
262 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
263 resgrp->fr = 1;
264 stq_p(&resgrp->dasm, 0);
265 stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
266 stw_p(&resgrp->mui, 0);
267 stw_p(&resgrp->i, 128);
268 resgrp->version = 0;
270 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
271 break;
273 default:
274 DPRINTF("unknown clp command\n");
275 stw_p(&resh->rsp, CLP_RC_CMD);
276 break;
279 out:
280 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
281 req_len + res_len)) {
282 return 0;
284 setcc(cpu, cc);
285 return 0;
288 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
290 CPUS390XState *env = &cpu->env;
291 S390PCIBusDevice *pbdev;
292 uint64_t offset;
293 uint64_t data;
294 uint8_t len;
295 uint32_t fh;
296 uint8_t pcias;
298 cpu_synchronize_state(CPU(cpu));
300 if (env->psw.mask & PSW_MASK_PSTATE) {
301 program_interrupt(env, PGM_PRIVILEGED, 4);
302 return 0;
305 if (r2 & 0x1) {
306 program_interrupt(env, PGM_SPECIFICATION, 4);
307 return 0;
310 fh = env->regs[r2] >> 32;
311 pcias = (env->regs[r2] >> 16) & 0xf;
312 len = env->regs[r2] & 0xf;
313 offset = env->regs[r2 + 1];
315 pbdev = s390_pci_find_dev_by_fh(fh);
316 if (!pbdev) {
317 DPRINTF("pcilg no pci dev\n");
318 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
319 return 0;
322 if (pbdev->lgstg_blocked) {
323 setcc(cpu, ZPCI_PCI_LS_ERR);
324 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
325 return 0;
328 if (pcias < 6) {
329 if ((8 - (offset & 0x7)) < len) {
330 program_interrupt(env, PGM_OPERAND, 4);
331 return 0;
333 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
334 memory_region_dispatch_read(mr, offset, &data, len,
335 MEMTXATTRS_UNSPECIFIED);
336 } else if (pcias == 15) {
337 if ((4 - (offset & 0x3)) < len) {
338 program_interrupt(env, PGM_OPERAND, 4);
339 return 0;
341 data = pci_host_config_read_common(
342 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
344 switch (len) {
345 case 1:
346 break;
347 case 2:
348 data = bswap16(data);
349 break;
350 case 4:
351 data = bswap32(data);
352 break;
353 case 8:
354 data = bswap64(data);
355 break;
356 default:
357 program_interrupt(env, PGM_OPERAND, 4);
358 return 0;
360 } else {
361 DPRINTF("invalid space\n");
362 setcc(cpu, ZPCI_PCI_LS_ERR);
363 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
364 return 0;
367 env->regs[r1] = data;
368 setcc(cpu, ZPCI_PCI_LS_OK);
369 return 0;
372 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
373 uint64_t *data, uint8_t len)
375 uint32_t val;
376 uint8_t *msg_data;
378 if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
379 return;
382 if (len != 4) {
383 DPRINTF("access msix table msg data but len is %d\n", len);
384 return;
387 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
388 PCI_MSIX_ENTRY_VECTOR_CTRL;
389 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
390 pci_set_long(msg_data, val);
391 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
394 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
396 if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
397 offset >= pbdev->msix.table_offset &&
398 offset <= pbdev->msix.table_offset +
399 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
400 return 1;
401 } else {
402 return 0;
406 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
408 CPUS390XState *env = &cpu->env;
409 uint64_t offset, data;
410 S390PCIBusDevice *pbdev;
411 uint8_t len;
412 uint32_t fh;
413 uint8_t pcias;
415 cpu_synchronize_state(CPU(cpu));
417 if (env->psw.mask & PSW_MASK_PSTATE) {
418 program_interrupt(env, PGM_PRIVILEGED, 4);
419 return 0;
422 if (r2 & 0x1) {
423 program_interrupt(env, PGM_SPECIFICATION, 4);
424 return 0;
427 fh = env->regs[r2] >> 32;
428 pcias = (env->regs[r2] >> 16) & 0xf;
429 len = env->regs[r2] & 0xf;
430 offset = env->regs[r2 + 1];
432 pbdev = s390_pci_find_dev_by_fh(fh);
433 if (!pbdev) {
434 DPRINTF("pcistg no pci dev\n");
435 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
436 return 0;
439 if (pbdev->lgstg_blocked) {
440 setcc(cpu, ZPCI_PCI_LS_ERR);
441 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
442 return 0;
445 data = env->regs[r1];
446 if (pcias < 6) {
447 if ((8 - (offset & 0x7)) < len) {
448 program_interrupt(env, PGM_OPERAND, 4);
449 return 0;
451 MemoryRegion *mr;
452 if (trap_msix(pbdev, offset, pcias)) {
453 offset = offset - pbdev->msix.table_offset;
454 mr = &pbdev->pdev->msix_table_mmio;
455 update_msix_table_msg_data(pbdev, offset, &data, len);
456 } else {
457 mr = pbdev->pdev->io_regions[pcias].memory;
460 memory_region_dispatch_write(mr, offset, data, len,
461 MEMTXATTRS_UNSPECIFIED);
462 } else if (pcias == 15) {
463 if ((4 - (offset & 0x3)) < len) {
464 program_interrupt(env, PGM_OPERAND, 4);
465 return 0;
467 switch (len) {
468 case 1:
469 break;
470 case 2:
471 data = bswap16(data);
472 break;
473 case 4:
474 data = bswap32(data);
475 break;
476 case 8:
477 data = bswap64(data);
478 break;
479 default:
480 program_interrupt(env, PGM_OPERAND, 4);
481 return 0;
484 pci_host_config_write_common(pbdev->pdev, offset,
485 pci_config_size(pbdev->pdev),
486 data, len);
487 } else {
488 DPRINTF("pcistg invalid space\n");
489 setcc(cpu, ZPCI_PCI_LS_ERR);
490 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
491 return 0;
494 setcc(cpu, ZPCI_PCI_LS_OK);
495 return 0;
498 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
500 CPUS390XState *env = &cpu->env;
501 uint32_t fh;
502 S390PCIBusDevice *pbdev;
503 hwaddr start, end;
504 IOMMUTLBEntry entry;
505 MemoryRegion *mr;
507 cpu_synchronize_state(CPU(cpu));
509 if (env->psw.mask & PSW_MASK_PSTATE) {
510 program_interrupt(env, PGM_PRIVILEGED, 4);
511 goto out;
514 if (r2 & 0x1) {
515 program_interrupt(env, PGM_SPECIFICATION, 4);
516 goto out;
519 fh = env->regs[r1] >> 32;
520 start = env->regs[r2];
521 end = start + env->regs[r2 + 1];
523 pbdev = s390_pci_find_dev_by_fh(fh);
525 if (!pbdev) {
526 DPRINTF("rpcit no pci dev\n");
527 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
528 goto out;
531 mr = pci_device_iommu_address_space(pbdev->pdev)->root;
532 while (start < end) {
533 entry = mr->iommu_ops->translate(mr, start, 0);
535 if (!entry.translated_addr) {
536 setcc(cpu, ZPCI_PCI_LS_ERR);
537 goto out;
540 memory_region_notify_iommu(mr, entry);
541 start += entry.addr_mask + 1;
544 setcc(cpu, ZPCI_PCI_LS_OK);
545 out:
546 return 0;
549 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
550 uint8_t ar)
552 CPUS390XState *env = &cpu->env;
553 S390PCIBusDevice *pbdev;
554 MemoryRegion *mr;
555 int i;
556 uint32_t fh;
557 uint8_t pcias;
558 uint8_t len;
559 uint8_t buffer[128];
561 if (env->psw.mask & PSW_MASK_PSTATE) {
562 program_interrupt(env, PGM_PRIVILEGED, 6);
563 return 0;
566 fh = env->regs[r1] >> 32;
567 pcias = (env->regs[r1] >> 16) & 0xf;
568 len = env->regs[r1] & 0xff;
570 if (pcias > 5) {
571 DPRINTF("pcistb invalid space\n");
572 setcc(cpu, ZPCI_PCI_LS_ERR);
573 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
574 return 0;
577 switch (len) {
578 case 16:
579 case 32:
580 case 64:
581 case 128:
582 break;
583 default:
584 program_interrupt(env, PGM_SPECIFICATION, 6);
585 return 0;
588 pbdev = s390_pci_find_dev_by_fh(fh);
589 if (!pbdev) {
590 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
591 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
592 return 0;
595 if (pbdev->lgstg_blocked) {
596 setcc(cpu, ZPCI_PCI_LS_ERR);
597 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
598 return 0;
601 mr = pbdev->pdev->io_regions[pcias].memory;
602 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
603 program_interrupt(env, PGM_ADDRESSING, 6);
604 return 0;
607 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
608 return 0;
611 for (i = 0; i < len / 8; i++) {
612 memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
613 ldq_p(buffer + i * 8), 8,
614 MEMTXATTRS_UNSPECIFIED);
617 setcc(cpu, ZPCI_PCI_LS_OK);
618 return 0;
621 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
623 int ret;
624 S390FLICState *fs = s390_get_flic();
625 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
627 ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
628 FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
629 &pbdev->routes.adapter.adapter_id);
630 assert(ret == 0);
632 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
633 ldq_p(&fib.aisb), true);
634 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
635 ldq_p(&fib.aibv), true);
637 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
638 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
639 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
640 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
641 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
642 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
643 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
645 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
646 return 0;
649 static int dereg_irqs(S390PCIBusDevice *pbdev)
651 S390FLICState *fs = s390_get_flic();
652 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
654 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
655 pbdev->routes.adapter.ind_addr, false);
657 pbdev->routes.adapter.summary_addr = 0;
658 pbdev->routes.adapter.summary_offset = 0;
659 pbdev->routes.adapter.ind_addr = 0;
660 pbdev->routes.adapter.ind_offset = 0;
661 pbdev->isc = 0;
662 pbdev->noi = 0;
663 pbdev->sum = 0;
665 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
666 return 0;
669 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
671 uint64_t pba = ldq_p(&fib.pba);
672 uint64_t pal = ldq_p(&fib.pal);
673 uint64_t g_iota = ldq_p(&fib.iota);
674 uint8_t dt = (g_iota >> 2) & 0x7;
675 uint8_t t = (g_iota >> 11) & 0x1;
677 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
678 program_interrupt(env, PGM_OPERAND, 6);
679 return -EINVAL;
682 /* currently we only support designation type 1 with translation */
683 if (!(dt == ZPCI_IOTA_RTTO && t)) {
684 error_report("unsupported ioat dt %d t %d", dt, t);
685 program_interrupt(env, PGM_OPERAND, 6);
686 return -EINVAL;
689 pbdev->pba = pba;
690 pbdev->pal = pal;
691 pbdev->g_iota = g_iota;
692 return 0;
695 static void dereg_ioat(S390PCIBusDevice *pbdev)
697 pbdev->pba = 0;
698 pbdev->pal = 0;
699 pbdev->g_iota = 0;
702 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
704 CPUS390XState *env = &cpu->env;
705 uint8_t oc;
706 uint32_t fh;
707 ZpciFib fib;
708 S390PCIBusDevice *pbdev;
709 uint64_t cc = ZPCI_PCI_LS_OK;
711 if (env->psw.mask & PSW_MASK_PSTATE) {
712 program_interrupt(env, PGM_PRIVILEGED, 6);
713 return 0;
716 oc = env->regs[r1] & 0xff;
717 fh = env->regs[r1] >> 32;
719 if (fiba & 0x7) {
720 program_interrupt(env, PGM_SPECIFICATION, 6);
721 return 0;
724 pbdev = s390_pci_find_dev_by_fh(fh);
725 if (!pbdev) {
726 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
727 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
728 return 0;
731 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
732 return 0;
735 switch (oc) {
736 case ZPCI_MOD_FC_REG_INT:
737 if (reg_irqs(env, pbdev, fib)) {
738 cc = ZPCI_PCI_LS_ERR;
740 break;
741 case ZPCI_MOD_FC_DEREG_INT:
742 dereg_irqs(pbdev);
743 break;
744 case ZPCI_MOD_FC_REG_IOAT:
745 if (reg_ioat(env, pbdev, fib)) {
746 cc = ZPCI_PCI_LS_ERR;
748 break;
749 case ZPCI_MOD_FC_DEREG_IOAT:
750 dereg_ioat(pbdev);
751 break;
752 case ZPCI_MOD_FC_REREG_IOAT:
753 dereg_ioat(pbdev);
754 if (reg_ioat(env, pbdev, fib)) {
755 cc = ZPCI_PCI_LS_ERR;
757 break;
758 case ZPCI_MOD_FC_RESET_ERROR:
759 pbdev->error_state = false;
760 pbdev->lgstg_blocked = false;
761 break;
762 case ZPCI_MOD_FC_RESET_BLOCK:
763 pbdev->lgstg_blocked = false;
764 break;
765 case ZPCI_MOD_FC_SET_MEASURE:
766 pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
767 break;
768 default:
769 program_interrupt(&cpu->env, PGM_OPERAND, 6);
770 cc = ZPCI_PCI_LS_ERR;
773 setcc(cpu, cc);
774 return 0;
777 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
779 CPUS390XState *env = &cpu->env;
780 uint32_t fh;
781 ZpciFib fib;
782 S390PCIBusDevice *pbdev;
783 uint32_t data;
784 uint64_t cc = ZPCI_PCI_LS_OK;
786 if (env->psw.mask & PSW_MASK_PSTATE) {
787 program_interrupt(env, PGM_PRIVILEGED, 6);
788 return 0;
791 fh = env->regs[r1] >> 32;
793 if (fiba & 0x7) {
794 program_interrupt(env, PGM_SPECIFICATION, 6);
795 return 0;
798 pbdev = s390_pci_find_dev_by_fh(fh);
799 if (!pbdev) {
800 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
801 return 0;
804 memset(&fib, 0, sizeof(fib));
805 stq_p(&fib.pba, pbdev->pba);
806 stq_p(&fib.pal, pbdev->pal);
807 stq_p(&fib.iota, pbdev->g_iota);
808 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
809 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
810 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
812 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
813 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
814 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
815 stl_p(&fib.data, data);
817 if (pbdev->fh >> ENABLE_BIT_OFFSET) {
818 fib.fc |= 0x80;
821 if (pbdev->error_state) {
822 fib.fc |= 0x40;
825 if (pbdev->lgstg_blocked) {
826 fib.fc |= 0x20;
829 if (pbdev->g_iota) {
830 fib.fc |= 0x10;
833 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
834 return 0;
837 setcc(cpu, cc);
838 return 0;