ram: Split host_from_stream_offset() into two helper functions
[qemu/ar7.git] / hw / s390x / s390-pci-inst.c
blobfe73ca881937860a35b82aa80415508decd57b30
1 /*
2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "qemu/osdep.h"
15 #include "s390-pci-inst.h"
16 #include "s390-pci-bus.h"
17 #include <exec/memory-internal.h>
18 #include <qemu/error-report.h>
20 /* #define DEBUG_S390PCI_INST */
21 #ifdef DEBUG_S390PCI_INST
22 #define DPRINTF(fmt, ...) \
23 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
24 #else
25 #define DPRINTF(fmt, ...) \
26 do { } while (0)
27 #endif
29 static void s390_set_status_code(CPUS390XState *env,
30 uint8_t r, uint64_t status_code)
32 env->regs[r] &= ~0xff000000ULL;
33 env->regs[r] |= (status_code & 0xff) << 24;
36 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
38 S390PCIBusDevice *pbdev;
39 uint32_t res_code, initial_l2, g_l2, finish;
40 int rc, idx;
41 uint64_t resume_token;
43 rc = 0;
44 if (lduw_p(&rrb->request.hdr.len) != 32) {
45 res_code = CLP_RC_LEN;
46 rc = -EINVAL;
47 goto out;
50 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
51 res_code = CLP_RC_FMT;
52 rc = -EINVAL;
53 goto out;
56 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
57 ldq_p(&rrb->request.reserved1) != 0 ||
58 ldq_p(&rrb->request.reserved2) != 0) {
59 res_code = CLP_RC_RESNOT0;
60 rc = -EINVAL;
61 goto out;
64 resume_token = ldq_p(&rrb->request.resume_token);
66 if (resume_token) {
67 pbdev = s390_pci_find_dev_by_idx(resume_token);
68 if (!pbdev) {
69 res_code = CLP_RC_LISTPCI_BADRT;
70 rc = -EINVAL;
71 goto out;
75 if (lduw_p(&rrb->response.hdr.len) < 48) {
76 res_code = CLP_RC_8K;
77 rc = -EINVAL;
78 goto out;
81 initial_l2 = lduw_p(&rrb->response.hdr.len);
82 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
83 != 0) {
84 res_code = CLP_RC_LEN;
85 rc = -EINVAL;
86 *cc = 3;
87 goto out;
90 stl_p(&rrb->response.fmt, 0);
91 stq_p(&rrb->response.reserved1, 0);
92 stq_p(&rrb->response.reserved2, 0);
93 stl_p(&rrb->response.mdd, FH_VIRT);
94 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
95 rrb->response.entry_size = sizeof(ClpFhListEntry);
96 finish = 0;
97 idx = resume_token;
98 g_l2 = LIST_PCI_HDR_LEN;
99 do {
100 pbdev = s390_pci_find_dev_by_idx(idx);
101 if (!pbdev) {
102 finish = 1;
103 break;
105 stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
106 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
107 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
108 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
109 stl_p(&rrb->response.fh_list[idx - resume_token].config,
110 pbdev->configured << 31);
111 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
112 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
114 g_l2 += sizeof(ClpFhListEntry);
115 /* Add endian check for DPRINTF? */
116 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
117 g_l2,
118 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
119 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
120 ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
121 ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
122 idx++;
123 } while (g_l2 < initial_l2);
125 if (finish == 1) {
126 resume_token = 0;
127 } else {
128 resume_token = idx;
130 stq_p(&rrb->response.resume_token, resume_token);
131 stw_p(&rrb->response.hdr.len, g_l2);
132 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
133 out:
134 if (rc) {
135 DPRINTF("list pci failed rc 0x%x\n", rc);
136 stw_p(&rrb->response.hdr.rsp, res_code);
138 return rc;
141 int clp_service_call(S390CPU *cpu, uint8_t r2)
143 ClpReqHdr *reqh;
144 ClpRspHdr *resh;
145 S390PCIBusDevice *pbdev;
146 uint32_t req_len;
147 uint32_t res_len;
148 uint8_t buffer[4096 * 2];
149 uint8_t cc = 0;
150 CPUS390XState *env = &cpu->env;
151 int i;
153 cpu_synchronize_state(CPU(cpu));
155 if (env->psw.mask & PSW_MASK_PSTATE) {
156 program_interrupt(env, PGM_PRIVILEGED, 4);
157 return 0;
160 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
161 return 0;
163 reqh = (ClpReqHdr *)buffer;
164 req_len = lduw_p(&reqh->len);
165 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
166 program_interrupt(env, PGM_OPERAND, 4);
167 return 0;
170 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
171 req_len + sizeof(*resh))) {
172 return 0;
174 resh = (ClpRspHdr *)(buffer + req_len);
175 res_len = lduw_p(&resh->len);
176 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
177 program_interrupt(env, PGM_OPERAND, 4);
178 return 0;
180 if ((req_len + res_len) > 8192) {
181 program_interrupt(env, PGM_OPERAND, 4);
182 return 0;
185 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
186 req_len + res_len)) {
187 return 0;
190 if (req_len != 32) {
191 stw_p(&resh->rsp, CLP_RC_LEN);
192 goto out;
195 switch (lduw_p(&reqh->cmd)) {
196 case CLP_LIST_PCI: {
197 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
198 list_pci(rrb, &cc);
199 break;
201 case CLP_SET_PCI_FN: {
202 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
203 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
205 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
206 if (!pbdev) {
207 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
208 goto out;
211 switch (reqsetpci->oc) {
212 case CLP_SET_ENABLE_PCI_FN:
213 pbdev->fh = pbdev->fh | FH_ENABLED;
214 stl_p(&ressetpci->fh, pbdev->fh);
215 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
216 break;
217 case CLP_SET_DISABLE_PCI_FN:
218 pbdev->fh = pbdev->fh & ~FH_ENABLED;
219 pbdev->error_state = false;
220 pbdev->lgstg_blocked = false;
221 stl_p(&ressetpci->fh, pbdev->fh);
222 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
223 break;
224 default:
225 DPRINTF("unknown set pci command\n");
226 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
227 break;
229 break;
231 case CLP_QUERY_PCI_FN: {
232 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
233 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
235 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
236 if (!pbdev) {
237 DPRINTF("query pci no pci dev\n");
238 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
239 goto out;
242 for (i = 0; i < PCI_BAR_COUNT; i++) {
243 uint32_t data = pci_get_long(pbdev->pdev->config +
244 PCI_BASE_ADDRESS_0 + (i * 4));
246 stl_p(&resquery->bar[i], data);
247 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
248 ctz64(pbdev->pdev->io_regions[i].size) : 0;
249 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
250 ldl_p(&resquery->bar[i]),
251 pbdev->pdev->io_regions[i].size,
252 resquery->bar_size[i]);
255 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
256 stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
257 stw_p(&resquery->pchid, 0);
258 stw_p(&resquery->ug, 1);
259 stl_p(&resquery->uid, pbdev->fid);
260 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
261 break;
263 case CLP_QUERY_PCI_FNGRP: {
264 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
265 resgrp->fr = 1;
266 stq_p(&resgrp->dasm, 0);
267 stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
268 stw_p(&resgrp->mui, 0);
269 stw_p(&resgrp->i, 128);
270 resgrp->version = 0;
272 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
273 break;
275 default:
276 DPRINTF("unknown clp command\n");
277 stw_p(&resh->rsp, CLP_RC_CMD);
278 break;
281 out:
282 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
283 req_len + res_len)) {
284 return 0;
286 setcc(cpu, cc);
287 return 0;
290 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
292 CPUS390XState *env = &cpu->env;
293 S390PCIBusDevice *pbdev;
294 uint64_t offset;
295 uint64_t data;
296 uint8_t len;
297 uint32_t fh;
298 uint8_t pcias;
300 cpu_synchronize_state(CPU(cpu));
302 if (env->psw.mask & PSW_MASK_PSTATE) {
303 program_interrupt(env, PGM_PRIVILEGED, 4);
304 return 0;
307 if (r2 & 0x1) {
308 program_interrupt(env, PGM_SPECIFICATION, 4);
309 return 0;
312 fh = env->regs[r2] >> 32;
313 pcias = (env->regs[r2] >> 16) & 0xf;
314 len = env->regs[r2] & 0xf;
315 offset = env->regs[r2 + 1];
317 pbdev = s390_pci_find_dev_by_fh(fh);
318 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
319 DPRINTF("pcilg no pci dev\n");
320 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
321 return 0;
324 if (pbdev->lgstg_blocked) {
325 setcc(cpu, ZPCI_PCI_LS_ERR);
326 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
327 return 0;
330 if (pcias < 6) {
331 if ((8 - (offset & 0x7)) < len) {
332 program_interrupt(env, PGM_OPERAND, 4);
333 return 0;
335 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
336 memory_region_dispatch_read(mr, offset, &data, len,
337 MEMTXATTRS_UNSPECIFIED);
338 } else if (pcias == 15) {
339 if ((4 - (offset & 0x3)) < len) {
340 program_interrupt(env, PGM_OPERAND, 4);
341 return 0;
343 data = pci_host_config_read_common(
344 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
346 switch (len) {
347 case 1:
348 break;
349 case 2:
350 data = bswap16(data);
351 break;
352 case 4:
353 data = bswap32(data);
354 break;
355 case 8:
356 data = bswap64(data);
357 break;
358 default:
359 program_interrupt(env, PGM_OPERAND, 4);
360 return 0;
362 } else {
363 DPRINTF("invalid space\n");
364 setcc(cpu, ZPCI_PCI_LS_ERR);
365 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
366 return 0;
369 env->regs[r1] = data;
370 setcc(cpu, ZPCI_PCI_LS_OK);
371 return 0;
374 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
375 uint64_t *data, uint8_t len)
377 uint32_t val;
378 uint8_t *msg_data;
380 if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
381 return;
384 if (len != 4) {
385 DPRINTF("access msix table msg data but len is %d\n", len);
386 return;
389 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
390 PCI_MSIX_ENTRY_VECTOR_CTRL;
391 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
392 pci_set_long(msg_data, val);
393 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
396 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
398 if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
399 offset >= pbdev->msix.table_offset &&
400 offset <= pbdev->msix.table_offset +
401 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
402 return 1;
403 } else {
404 return 0;
408 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
410 CPUS390XState *env = &cpu->env;
411 uint64_t offset, data;
412 S390PCIBusDevice *pbdev;
413 uint8_t len;
414 uint32_t fh;
415 uint8_t pcias;
417 cpu_synchronize_state(CPU(cpu));
419 if (env->psw.mask & PSW_MASK_PSTATE) {
420 program_interrupt(env, PGM_PRIVILEGED, 4);
421 return 0;
424 if (r2 & 0x1) {
425 program_interrupt(env, PGM_SPECIFICATION, 4);
426 return 0;
429 fh = env->regs[r2] >> 32;
430 pcias = (env->regs[r2] >> 16) & 0xf;
431 len = env->regs[r2] & 0xf;
432 offset = env->regs[r2 + 1];
434 pbdev = s390_pci_find_dev_by_fh(fh);
435 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
436 DPRINTF("pcistg no pci dev\n");
437 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
438 return 0;
441 if (pbdev->lgstg_blocked) {
442 setcc(cpu, ZPCI_PCI_LS_ERR);
443 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
444 return 0;
447 data = env->regs[r1];
448 if (pcias < 6) {
449 if ((8 - (offset & 0x7)) < len) {
450 program_interrupt(env, PGM_OPERAND, 4);
451 return 0;
453 MemoryRegion *mr;
454 if (trap_msix(pbdev, offset, pcias)) {
455 offset = offset - pbdev->msix.table_offset;
456 mr = &pbdev->pdev->msix_table_mmio;
457 update_msix_table_msg_data(pbdev, offset, &data, len);
458 } else {
459 mr = pbdev->pdev->io_regions[pcias].memory;
462 memory_region_dispatch_write(mr, offset, data, len,
463 MEMTXATTRS_UNSPECIFIED);
464 } else if (pcias == 15) {
465 if ((4 - (offset & 0x3)) < len) {
466 program_interrupt(env, PGM_OPERAND, 4);
467 return 0;
469 switch (len) {
470 case 1:
471 break;
472 case 2:
473 data = bswap16(data);
474 break;
475 case 4:
476 data = bswap32(data);
477 break;
478 case 8:
479 data = bswap64(data);
480 break;
481 default:
482 program_interrupt(env, PGM_OPERAND, 4);
483 return 0;
486 pci_host_config_write_common(pbdev->pdev, offset,
487 pci_config_size(pbdev->pdev),
488 data, len);
489 } else {
490 DPRINTF("pcistg invalid space\n");
491 setcc(cpu, ZPCI_PCI_LS_ERR);
492 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
493 return 0;
496 setcc(cpu, ZPCI_PCI_LS_OK);
497 return 0;
500 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
502 CPUS390XState *env = &cpu->env;
503 uint32_t fh;
504 S390PCIBusDevice *pbdev;
505 hwaddr start, end;
506 IOMMUTLBEntry entry;
507 MemoryRegion *mr;
509 cpu_synchronize_state(CPU(cpu));
511 if (env->psw.mask & PSW_MASK_PSTATE) {
512 program_interrupt(env, PGM_PRIVILEGED, 4);
513 goto out;
516 if (r2 & 0x1) {
517 program_interrupt(env, PGM_SPECIFICATION, 4);
518 goto out;
521 fh = env->regs[r1] >> 32;
522 start = env->regs[r2];
523 end = start + env->regs[r2 + 1];
525 pbdev = s390_pci_find_dev_by_fh(fh);
526 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
527 DPRINTF("rpcit no pci dev\n");
528 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
529 goto out;
532 mr = &pbdev->iommu_mr;
533 while (start < end) {
534 entry = mr->iommu_ops->translate(mr, start, 0);
536 if (!entry.translated_addr) {
537 setcc(cpu, ZPCI_PCI_LS_ERR);
538 goto out;
541 memory_region_notify_iommu(mr, entry);
542 start += entry.addr_mask + 1;
545 setcc(cpu, ZPCI_PCI_LS_OK);
546 out:
547 return 0;
550 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
551 uint8_t ar)
553 CPUS390XState *env = &cpu->env;
554 S390PCIBusDevice *pbdev;
555 MemoryRegion *mr;
556 int i;
557 uint32_t fh;
558 uint8_t pcias;
559 uint8_t len;
560 uint8_t buffer[128];
562 if (env->psw.mask & PSW_MASK_PSTATE) {
563 program_interrupt(env, PGM_PRIVILEGED, 6);
564 return 0;
567 fh = env->regs[r1] >> 32;
568 pcias = (env->regs[r1] >> 16) & 0xf;
569 len = env->regs[r1] & 0xff;
571 if (pcias > 5) {
572 DPRINTF("pcistb invalid space\n");
573 setcc(cpu, ZPCI_PCI_LS_ERR);
574 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
575 return 0;
578 switch (len) {
579 case 16:
580 case 32:
581 case 64:
582 case 128:
583 break;
584 default:
585 program_interrupt(env, PGM_SPECIFICATION, 6);
586 return 0;
589 pbdev = s390_pci_find_dev_by_fh(fh);
590 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
591 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
592 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
593 return 0;
596 if (pbdev->lgstg_blocked) {
597 setcc(cpu, ZPCI_PCI_LS_ERR);
598 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
599 return 0;
602 mr = pbdev->pdev->io_regions[pcias].memory;
603 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
604 program_interrupt(env, PGM_ADDRESSING, 6);
605 return 0;
608 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
609 return 0;
612 for (i = 0; i < len / 8; i++) {
613 memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
614 ldq_p(buffer + i * 8), 8,
615 MEMTXATTRS_UNSPECIFIED);
618 setcc(cpu, ZPCI_PCI_LS_OK);
619 return 0;
622 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
624 int ret;
625 S390FLICState *fs = s390_get_flic();
626 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
628 ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
629 FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
630 &pbdev->routes.adapter.adapter_id);
631 assert(ret == 0);
633 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
634 ldq_p(&fib.aisb), true);
635 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
636 ldq_p(&fib.aibv), true);
638 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
639 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
640 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
641 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
642 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
643 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
644 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
646 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
647 return 0;
650 static int dereg_irqs(S390PCIBusDevice *pbdev)
652 S390FLICState *fs = s390_get_flic();
653 S390FLICStateClass *fsc = S390_FLIC_COMMON_GET_CLASS(fs);
655 fsc->io_adapter_map(fs, pbdev->routes.adapter.adapter_id,
656 pbdev->routes.adapter.ind_addr, false);
658 pbdev->routes.adapter.summary_addr = 0;
659 pbdev->routes.adapter.summary_offset = 0;
660 pbdev->routes.adapter.ind_addr = 0;
661 pbdev->routes.adapter.ind_offset = 0;
662 pbdev->isc = 0;
663 pbdev->noi = 0;
664 pbdev->sum = 0;
666 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
667 return 0;
670 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
672 uint64_t pba = ldq_p(&fib.pba);
673 uint64_t pal = ldq_p(&fib.pal);
674 uint64_t g_iota = ldq_p(&fib.iota);
675 uint8_t dt = (g_iota >> 2) & 0x7;
676 uint8_t t = (g_iota >> 11) & 0x1;
678 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
679 program_interrupt(env, PGM_OPERAND, 6);
680 return -EINVAL;
683 /* currently we only support designation type 1 with translation */
684 if (!(dt == ZPCI_IOTA_RTTO && t)) {
685 error_report("unsupported ioat dt %d t %d", dt, t);
686 program_interrupt(env, PGM_OPERAND, 6);
687 return -EINVAL;
690 pbdev->pba = pba;
691 pbdev->pal = pal;
692 pbdev->g_iota = g_iota;
694 s390_pcihost_iommu_configure(pbdev, true);
696 return 0;
699 static void dereg_ioat(S390PCIBusDevice *pbdev)
701 pbdev->pba = 0;
702 pbdev->pal = 0;
703 pbdev->g_iota = 0;
705 s390_pcihost_iommu_configure(pbdev, false);
708 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
710 CPUS390XState *env = &cpu->env;
711 uint8_t oc;
712 uint32_t fh;
713 ZpciFib fib;
714 S390PCIBusDevice *pbdev;
715 uint64_t cc = ZPCI_PCI_LS_OK;
717 if (env->psw.mask & PSW_MASK_PSTATE) {
718 program_interrupt(env, PGM_PRIVILEGED, 6);
719 return 0;
722 oc = env->regs[r1] & 0xff;
723 fh = env->regs[r1] >> 32;
725 if (fiba & 0x7) {
726 program_interrupt(env, PGM_SPECIFICATION, 6);
727 return 0;
730 pbdev = s390_pci_find_dev_by_fh(fh);
731 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
732 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
733 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
734 return 0;
737 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
738 return 0;
741 switch (oc) {
742 case ZPCI_MOD_FC_REG_INT:
743 if (reg_irqs(env, pbdev, fib)) {
744 cc = ZPCI_PCI_LS_ERR;
746 break;
747 case ZPCI_MOD_FC_DEREG_INT:
748 dereg_irqs(pbdev);
749 break;
750 case ZPCI_MOD_FC_REG_IOAT:
751 if (reg_ioat(env, pbdev, fib)) {
752 cc = ZPCI_PCI_LS_ERR;
754 break;
755 case ZPCI_MOD_FC_DEREG_IOAT:
756 dereg_ioat(pbdev);
757 break;
758 case ZPCI_MOD_FC_REREG_IOAT:
759 dereg_ioat(pbdev);
760 if (reg_ioat(env, pbdev, fib)) {
761 cc = ZPCI_PCI_LS_ERR;
763 break;
764 case ZPCI_MOD_FC_RESET_ERROR:
765 pbdev->error_state = false;
766 pbdev->lgstg_blocked = false;
767 break;
768 case ZPCI_MOD_FC_RESET_BLOCK:
769 pbdev->lgstg_blocked = false;
770 break;
771 case ZPCI_MOD_FC_SET_MEASURE:
772 pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
773 break;
774 default:
775 program_interrupt(&cpu->env, PGM_OPERAND, 6);
776 cc = ZPCI_PCI_LS_ERR;
779 setcc(cpu, cc);
780 return 0;
783 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
785 CPUS390XState *env = &cpu->env;
786 uint32_t fh;
787 ZpciFib fib;
788 S390PCIBusDevice *pbdev;
789 uint32_t data;
790 uint64_t cc = ZPCI_PCI_LS_OK;
792 if (env->psw.mask & PSW_MASK_PSTATE) {
793 program_interrupt(env, PGM_PRIVILEGED, 6);
794 return 0;
797 fh = env->regs[r1] >> 32;
799 if (fiba & 0x7) {
800 program_interrupt(env, PGM_SPECIFICATION, 6);
801 return 0;
804 pbdev = s390_pci_find_dev_by_fh(fh);
805 if (!pbdev) {
806 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
807 return 0;
810 memset(&fib, 0, sizeof(fib));
811 stq_p(&fib.pba, pbdev->pba);
812 stq_p(&fib.pal, pbdev->pal);
813 stq_p(&fib.iota, pbdev->g_iota);
814 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
815 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
816 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
818 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
819 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
820 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
821 stl_p(&fib.data, data);
823 if (pbdev->fh & FH_ENABLED) {
824 fib.fc |= 0x80;
827 if (pbdev->error_state) {
828 fib.fc |= 0x40;
831 if (pbdev->lgstg_blocked) {
832 fib.fc |= 0x20;
835 if (pbdev->g_iota) {
836 fib.fc |= 0x10;
839 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
840 return 0;
843 setcc(cpu, cc);
844 return 0;