usb-bot: hotplug support
[qemu/kevin.git] / hw / s390x / s390-pci-inst.c
blob479375f65d5b619192c2a15d824f018460706b95
1 /*
2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
11 * directory.
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
16 #include "cpu.h"
17 #include "s390-pci-inst.h"
18 #include "s390-pci-bus.h"
19 #include <exec/memory-internal.h>
20 #include <qemu/error-report.h>
22 /* #define DEBUG_S390PCI_INST */
23 #ifdef DEBUG_S390PCI_INST
24 #define DPRINTF(fmt, ...) \
25 do { fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); } while (0)
26 #else
27 #define DPRINTF(fmt, ...) \
28 do { } while (0)
29 #endif
31 static void s390_set_status_code(CPUS390XState *env,
32 uint8_t r, uint64_t status_code)
34 env->regs[r] &= ~0xff000000ULL;
35 env->regs[r] |= (status_code & 0xff) << 24;
38 static int list_pci(ClpReqRspListPci *rrb, uint8_t *cc)
40 S390PCIBusDevice *pbdev;
41 uint32_t res_code, initial_l2, g_l2, finish;
42 int rc, idx;
43 uint64_t resume_token;
45 rc = 0;
46 if (lduw_p(&rrb->request.hdr.len) != 32) {
47 res_code = CLP_RC_LEN;
48 rc = -EINVAL;
49 goto out;
52 if ((ldl_p(&rrb->request.fmt) & CLP_MASK_FMT) != 0) {
53 res_code = CLP_RC_FMT;
54 rc = -EINVAL;
55 goto out;
58 if ((ldl_p(&rrb->request.fmt) & ~CLP_MASK_FMT) != 0 ||
59 ldq_p(&rrb->request.reserved1) != 0 ||
60 ldq_p(&rrb->request.reserved2) != 0) {
61 res_code = CLP_RC_RESNOT0;
62 rc = -EINVAL;
63 goto out;
66 resume_token = ldq_p(&rrb->request.resume_token);
68 if (resume_token) {
69 pbdev = s390_pci_find_dev_by_idx(resume_token);
70 if (!pbdev) {
71 res_code = CLP_RC_LISTPCI_BADRT;
72 rc = -EINVAL;
73 goto out;
77 if (lduw_p(&rrb->response.hdr.len) < 48) {
78 res_code = CLP_RC_8K;
79 rc = -EINVAL;
80 goto out;
83 initial_l2 = lduw_p(&rrb->response.hdr.len);
84 if ((initial_l2 - LIST_PCI_HDR_LEN) % sizeof(ClpFhListEntry)
85 != 0) {
86 res_code = CLP_RC_LEN;
87 rc = -EINVAL;
88 *cc = 3;
89 goto out;
92 stl_p(&rrb->response.fmt, 0);
93 stq_p(&rrb->response.reserved1, 0);
94 stq_p(&rrb->response.reserved2, 0);
95 stl_p(&rrb->response.mdd, FH_VIRT);
96 stw_p(&rrb->response.max_fn, PCI_MAX_FUNCTIONS);
97 rrb->response.entry_size = sizeof(ClpFhListEntry);
98 finish = 0;
99 idx = resume_token;
100 g_l2 = LIST_PCI_HDR_LEN;
101 do {
102 pbdev = s390_pci_find_dev_by_idx(idx);
103 if (!pbdev) {
104 finish = 1;
105 break;
107 stw_p(&rrb->response.fh_list[idx - resume_token].device_id,
108 pci_get_word(pbdev->pdev->config + PCI_DEVICE_ID));
109 stw_p(&rrb->response.fh_list[idx - resume_token].vendor_id,
110 pci_get_word(pbdev->pdev->config + PCI_VENDOR_ID));
111 stl_p(&rrb->response.fh_list[idx - resume_token].config,
112 pbdev->configured << 31);
113 stl_p(&rrb->response.fh_list[idx - resume_token].fid, pbdev->fid);
114 stl_p(&rrb->response.fh_list[idx - resume_token].fh, pbdev->fh);
116 g_l2 += sizeof(ClpFhListEntry);
117 /* Add endian check for DPRINTF? */
118 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
119 g_l2,
120 lduw_p(&rrb->response.fh_list[idx - resume_token].vendor_id),
121 lduw_p(&rrb->response.fh_list[idx - resume_token].device_id),
122 ldl_p(&rrb->response.fh_list[idx - resume_token].fid),
123 ldl_p(&rrb->response.fh_list[idx - resume_token].fh));
124 idx++;
125 } while (g_l2 < initial_l2);
127 if (finish == 1) {
128 resume_token = 0;
129 } else {
130 resume_token = idx;
132 stq_p(&rrb->response.resume_token, resume_token);
133 stw_p(&rrb->response.hdr.len, g_l2);
134 stw_p(&rrb->response.hdr.rsp, CLP_RC_OK);
135 out:
136 if (rc) {
137 DPRINTF("list pci failed rc 0x%x\n", rc);
138 stw_p(&rrb->response.hdr.rsp, res_code);
140 return rc;
143 int clp_service_call(S390CPU *cpu, uint8_t r2)
145 ClpReqHdr *reqh;
146 ClpRspHdr *resh;
147 S390PCIBusDevice *pbdev;
148 uint32_t req_len;
149 uint32_t res_len;
150 uint8_t buffer[4096 * 2];
151 uint8_t cc = 0;
152 CPUS390XState *env = &cpu->env;
153 int i;
155 cpu_synchronize_state(CPU(cpu));
157 if (env->psw.mask & PSW_MASK_PSTATE) {
158 program_interrupt(env, PGM_PRIVILEGED, 4);
159 return 0;
162 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer, sizeof(*reqh))) {
163 return 0;
165 reqh = (ClpReqHdr *)buffer;
166 req_len = lduw_p(&reqh->len);
167 if (req_len < 16 || req_len > 8184 || (req_len % 8 != 0)) {
168 program_interrupt(env, PGM_OPERAND, 4);
169 return 0;
172 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
173 req_len + sizeof(*resh))) {
174 return 0;
176 resh = (ClpRspHdr *)(buffer + req_len);
177 res_len = lduw_p(&resh->len);
178 if (res_len < 8 || res_len > 8176 || (res_len % 8 != 0)) {
179 program_interrupt(env, PGM_OPERAND, 4);
180 return 0;
182 if ((req_len + res_len) > 8192) {
183 program_interrupt(env, PGM_OPERAND, 4);
184 return 0;
187 if (s390_cpu_virt_mem_read(cpu, env->regs[r2], r2, buffer,
188 req_len + res_len)) {
189 return 0;
192 if (req_len != 32) {
193 stw_p(&resh->rsp, CLP_RC_LEN);
194 goto out;
197 switch (lduw_p(&reqh->cmd)) {
198 case CLP_LIST_PCI: {
199 ClpReqRspListPci *rrb = (ClpReqRspListPci *)buffer;
200 list_pci(rrb, &cc);
201 break;
203 case CLP_SET_PCI_FN: {
204 ClpReqSetPci *reqsetpci = (ClpReqSetPci *)reqh;
205 ClpRspSetPci *ressetpci = (ClpRspSetPci *)resh;
207 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqsetpci->fh));
208 if (!pbdev) {
209 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FH);
210 goto out;
213 switch (reqsetpci->oc) {
214 case CLP_SET_ENABLE_PCI_FN:
215 pbdev->fh = pbdev->fh | FH_ENABLED;
216 stl_p(&ressetpci->fh, pbdev->fh);
217 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
218 break;
219 case CLP_SET_DISABLE_PCI_FN:
220 pbdev->fh = pbdev->fh & ~FH_ENABLED;
221 pbdev->error_state = false;
222 pbdev->lgstg_blocked = false;
223 stl_p(&ressetpci->fh, pbdev->fh);
224 stw_p(&ressetpci->hdr.rsp, CLP_RC_OK);
225 break;
226 default:
227 DPRINTF("unknown set pci command\n");
228 stw_p(&ressetpci->hdr.rsp, CLP_RC_SETPCIFN_FHOP);
229 break;
231 break;
233 case CLP_QUERY_PCI_FN: {
234 ClpReqQueryPci *reqquery = (ClpReqQueryPci *)reqh;
235 ClpRspQueryPci *resquery = (ClpRspQueryPci *)resh;
237 pbdev = s390_pci_find_dev_by_fh(ldl_p(&reqquery->fh));
238 if (!pbdev) {
239 DPRINTF("query pci no pci dev\n");
240 stw_p(&resquery->hdr.rsp, CLP_RC_SETPCIFN_FH);
241 goto out;
244 for (i = 0; i < PCI_BAR_COUNT; i++) {
245 uint32_t data = pci_get_long(pbdev->pdev->config +
246 PCI_BASE_ADDRESS_0 + (i * 4));
248 stl_p(&resquery->bar[i], data);
249 resquery->bar_size[i] = pbdev->pdev->io_regions[i].size ?
250 ctz64(pbdev->pdev->io_regions[i].size) : 0;
251 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64 "barsize 0x%x\n", i,
252 ldl_p(&resquery->bar[i]),
253 pbdev->pdev->io_regions[i].size,
254 resquery->bar_size[i]);
257 stq_p(&resquery->sdma, ZPCI_SDMA_ADDR);
258 stq_p(&resquery->edma, ZPCI_EDMA_ADDR);
259 stw_p(&resquery->pchid, 0);
260 stw_p(&resquery->ug, 1);
261 stl_p(&resquery->uid, pbdev->fid);
262 stw_p(&resquery->hdr.rsp, CLP_RC_OK);
263 break;
265 case CLP_QUERY_PCI_FNGRP: {
266 ClpRspQueryPciGrp *resgrp = (ClpRspQueryPciGrp *)resh;
267 resgrp->fr = 1;
268 stq_p(&resgrp->dasm, 0);
269 stq_p(&resgrp->msia, ZPCI_MSI_ADDR);
270 stw_p(&resgrp->mui, 0);
271 stw_p(&resgrp->i, 128);
272 resgrp->version = 0;
274 stw_p(&resgrp->hdr.rsp, CLP_RC_OK);
275 break;
277 default:
278 DPRINTF("unknown clp command\n");
279 stw_p(&resh->rsp, CLP_RC_CMD);
280 break;
283 out:
284 if (s390_cpu_virt_mem_write(cpu, env->regs[r2], r2, buffer,
285 req_len + res_len)) {
286 return 0;
288 setcc(cpu, cc);
289 return 0;
292 int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
294 CPUS390XState *env = &cpu->env;
295 S390PCIBusDevice *pbdev;
296 uint64_t offset;
297 uint64_t data;
298 uint8_t len;
299 uint32_t fh;
300 uint8_t pcias;
302 cpu_synchronize_state(CPU(cpu));
304 if (env->psw.mask & PSW_MASK_PSTATE) {
305 program_interrupt(env, PGM_PRIVILEGED, 4);
306 return 0;
309 if (r2 & 0x1) {
310 program_interrupt(env, PGM_SPECIFICATION, 4);
311 return 0;
314 fh = env->regs[r2] >> 32;
315 pcias = (env->regs[r2] >> 16) & 0xf;
316 len = env->regs[r2] & 0xf;
317 offset = env->regs[r2 + 1];
319 pbdev = s390_pci_find_dev_by_fh(fh);
320 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
321 DPRINTF("pcilg no pci dev\n");
322 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
323 return 0;
326 if (pbdev->lgstg_blocked) {
327 setcc(cpu, ZPCI_PCI_LS_ERR);
328 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
329 return 0;
332 if (pcias < 6) {
333 if ((8 - (offset & 0x7)) < len) {
334 program_interrupt(env, PGM_OPERAND, 4);
335 return 0;
337 MemoryRegion *mr = pbdev->pdev->io_regions[pcias].memory;
338 memory_region_dispatch_read(mr, offset, &data, len,
339 MEMTXATTRS_UNSPECIFIED);
340 } else if (pcias == 15) {
341 if ((4 - (offset & 0x3)) < len) {
342 program_interrupt(env, PGM_OPERAND, 4);
343 return 0;
345 data = pci_host_config_read_common(
346 pbdev->pdev, offset, pci_config_size(pbdev->pdev), len);
348 switch (len) {
349 case 1:
350 break;
351 case 2:
352 data = bswap16(data);
353 break;
354 case 4:
355 data = bswap32(data);
356 break;
357 case 8:
358 data = bswap64(data);
359 break;
360 default:
361 program_interrupt(env, PGM_OPERAND, 4);
362 return 0;
364 } else {
365 DPRINTF("invalid space\n");
366 setcc(cpu, ZPCI_PCI_LS_ERR);
367 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
368 return 0;
371 env->regs[r1] = data;
372 setcc(cpu, ZPCI_PCI_LS_OK);
373 return 0;
376 static void update_msix_table_msg_data(S390PCIBusDevice *pbdev, uint64_t offset,
377 uint64_t *data, uint8_t len)
379 uint32_t val;
380 uint8_t *msg_data;
382 if (offset % PCI_MSIX_ENTRY_SIZE != 8) {
383 return;
386 if (len != 4) {
387 DPRINTF("access msix table msg data but len is %d\n", len);
388 return;
391 msg_data = (uint8_t *)data - offset % PCI_MSIX_ENTRY_SIZE +
392 PCI_MSIX_ENTRY_VECTOR_CTRL;
393 val = pci_get_long(msg_data) | (pbdev->fid << ZPCI_MSI_VEC_BITS);
394 pci_set_long(msg_data, val);
395 DPRINTF("update msix msg_data to 0x%" PRIx64 "\n", *data);
398 static int trap_msix(S390PCIBusDevice *pbdev, uint64_t offset, uint8_t pcias)
400 if (pbdev->msix.available && pbdev->msix.table_bar == pcias &&
401 offset >= pbdev->msix.table_offset &&
402 offset <= pbdev->msix.table_offset +
403 (pbdev->msix.entries - 1) * PCI_MSIX_ENTRY_SIZE) {
404 return 1;
405 } else {
406 return 0;
410 int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
412 CPUS390XState *env = &cpu->env;
413 uint64_t offset, data;
414 S390PCIBusDevice *pbdev;
415 uint8_t len;
416 uint32_t fh;
417 uint8_t pcias;
419 cpu_synchronize_state(CPU(cpu));
421 if (env->psw.mask & PSW_MASK_PSTATE) {
422 program_interrupt(env, PGM_PRIVILEGED, 4);
423 return 0;
426 if (r2 & 0x1) {
427 program_interrupt(env, PGM_SPECIFICATION, 4);
428 return 0;
431 fh = env->regs[r2] >> 32;
432 pcias = (env->regs[r2] >> 16) & 0xf;
433 len = env->regs[r2] & 0xf;
434 offset = env->regs[r2 + 1];
436 pbdev = s390_pci_find_dev_by_fh(fh);
437 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
438 DPRINTF("pcistg no pci dev\n");
439 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
440 return 0;
443 if (pbdev->lgstg_blocked) {
444 setcc(cpu, ZPCI_PCI_LS_ERR);
445 s390_set_status_code(env, r2, ZPCI_PCI_ST_BLOCKED);
446 return 0;
449 data = env->regs[r1];
450 if (pcias < 6) {
451 if ((8 - (offset & 0x7)) < len) {
452 program_interrupt(env, PGM_OPERAND, 4);
453 return 0;
455 MemoryRegion *mr;
456 if (trap_msix(pbdev, offset, pcias)) {
457 offset = offset - pbdev->msix.table_offset;
458 mr = &pbdev->pdev->msix_table_mmio;
459 update_msix_table_msg_data(pbdev, offset, &data, len);
460 } else {
461 mr = pbdev->pdev->io_regions[pcias].memory;
464 memory_region_dispatch_write(mr, offset, data, len,
465 MEMTXATTRS_UNSPECIFIED);
466 } else if (pcias == 15) {
467 if ((4 - (offset & 0x3)) < len) {
468 program_interrupt(env, PGM_OPERAND, 4);
469 return 0;
471 switch (len) {
472 case 1:
473 break;
474 case 2:
475 data = bswap16(data);
476 break;
477 case 4:
478 data = bswap32(data);
479 break;
480 case 8:
481 data = bswap64(data);
482 break;
483 default:
484 program_interrupt(env, PGM_OPERAND, 4);
485 return 0;
488 pci_host_config_write_common(pbdev->pdev, offset,
489 pci_config_size(pbdev->pdev),
490 data, len);
491 } else {
492 DPRINTF("pcistg invalid space\n");
493 setcc(cpu, ZPCI_PCI_LS_ERR);
494 s390_set_status_code(env, r2, ZPCI_PCI_ST_INVAL_AS);
495 return 0;
498 setcc(cpu, ZPCI_PCI_LS_OK);
499 return 0;
502 int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2)
504 CPUS390XState *env = &cpu->env;
505 uint32_t fh;
506 S390PCIBusDevice *pbdev;
507 hwaddr start, end;
508 IOMMUTLBEntry entry;
509 MemoryRegion *mr;
511 cpu_synchronize_state(CPU(cpu));
513 if (env->psw.mask & PSW_MASK_PSTATE) {
514 program_interrupt(env, PGM_PRIVILEGED, 4);
515 goto out;
518 if (r2 & 0x1) {
519 program_interrupt(env, PGM_SPECIFICATION, 4);
520 goto out;
523 fh = env->regs[r1] >> 32;
524 start = env->regs[r2];
525 end = start + env->regs[r2 + 1];
527 pbdev = s390_pci_find_dev_by_fh(fh);
528 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
529 DPRINTF("rpcit no pci dev\n");
530 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
531 goto out;
534 mr = &pbdev->iommu_mr;
535 while (start < end) {
536 entry = mr->iommu_ops->translate(mr, start, 0);
538 if (!entry.translated_addr) {
539 setcc(cpu, ZPCI_PCI_LS_ERR);
540 goto out;
543 memory_region_notify_iommu(mr, entry);
544 start += entry.addr_mask + 1;
547 setcc(cpu, ZPCI_PCI_LS_OK);
548 out:
549 return 0;
552 int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
553 uint8_t ar)
555 CPUS390XState *env = &cpu->env;
556 S390PCIBusDevice *pbdev;
557 MemoryRegion *mr;
558 int i;
559 uint32_t fh;
560 uint8_t pcias;
561 uint8_t len;
562 uint8_t buffer[128];
564 if (env->psw.mask & PSW_MASK_PSTATE) {
565 program_interrupt(env, PGM_PRIVILEGED, 6);
566 return 0;
569 fh = env->regs[r1] >> 32;
570 pcias = (env->regs[r1] >> 16) & 0xf;
571 len = env->regs[r1] & 0xff;
573 if (pcias > 5) {
574 DPRINTF("pcistb invalid space\n");
575 setcc(cpu, ZPCI_PCI_LS_ERR);
576 s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
577 return 0;
580 switch (len) {
581 case 16:
582 case 32:
583 case 64:
584 case 128:
585 break;
586 default:
587 program_interrupt(env, PGM_SPECIFICATION, 6);
588 return 0;
591 pbdev = s390_pci_find_dev_by_fh(fh);
592 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
593 DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
594 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
595 return 0;
598 if (pbdev->lgstg_blocked) {
599 setcc(cpu, ZPCI_PCI_LS_ERR);
600 s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
601 return 0;
604 mr = pbdev->pdev->io_regions[pcias].memory;
605 if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
606 program_interrupt(env, PGM_ADDRESSING, 6);
607 return 0;
610 if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
611 return 0;
614 for (i = 0; i < len / 8; i++) {
615 memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
616 ldq_p(buffer + i * 8), 8,
617 MEMTXATTRS_UNSPECIFIED);
620 setcc(cpu, ZPCI_PCI_LS_OK);
621 return 0;
624 static int reg_irqs(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
626 int ret, len;
628 ret = css_register_io_adapter(S390_PCIPT_ADAPTER,
629 FIB_DATA_ISC(ldl_p(&fib.data)), true, false,
630 &pbdev->routes.adapter.adapter_id);
631 assert(ret == 0);
633 pbdev->summary_ind = get_indicator(ldq_p(&fib.aisb), sizeof(uint64_t));
634 len = BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib.data))) * sizeof(unsigned long);
635 pbdev->indicator = get_indicator(ldq_p(&fib.aibv), len);
637 ret = map_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
638 if (ret) {
639 goto out;
642 ret = map_indicator(&pbdev->routes.adapter, pbdev->indicator);
643 if (ret) {
644 goto out;
647 pbdev->routes.adapter.summary_addr = ldq_p(&fib.aisb);
648 pbdev->routes.adapter.summary_offset = FIB_DATA_AISBO(ldl_p(&fib.data));
649 pbdev->routes.adapter.ind_addr = ldq_p(&fib.aibv);
650 pbdev->routes.adapter.ind_offset = FIB_DATA_AIBVO(ldl_p(&fib.data));
651 pbdev->isc = FIB_DATA_ISC(ldl_p(&fib.data));
652 pbdev->noi = FIB_DATA_NOI(ldl_p(&fib.data));
653 pbdev->sum = FIB_DATA_SUM(ldl_p(&fib.data));
655 DPRINTF("reg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
656 return 0;
657 out:
658 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
659 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
660 pbdev->summary_ind = NULL;
661 pbdev->indicator = NULL;
662 return ret;
665 int pci_dereg_irqs(S390PCIBusDevice *pbdev)
667 release_indicator(&pbdev->routes.adapter, pbdev->summary_ind);
668 release_indicator(&pbdev->routes.adapter, pbdev->indicator);
670 pbdev->summary_ind = NULL;
671 pbdev->indicator = NULL;
672 pbdev->routes.adapter.summary_addr = 0;
673 pbdev->routes.adapter.summary_offset = 0;
674 pbdev->routes.adapter.ind_addr = 0;
675 pbdev->routes.adapter.ind_offset = 0;
676 pbdev->isc = 0;
677 pbdev->noi = 0;
678 pbdev->sum = 0;
680 DPRINTF("dereg_irqs adapter id %d\n", pbdev->routes.adapter.adapter_id);
681 return 0;
684 static int reg_ioat(CPUS390XState *env, S390PCIBusDevice *pbdev, ZpciFib fib)
686 uint64_t pba = ldq_p(&fib.pba);
687 uint64_t pal = ldq_p(&fib.pal);
688 uint64_t g_iota = ldq_p(&fib.iota);
689 uint8_t dt = (g_iota >> 2) & 0x7;
690 uint8_t t = (g_iota >> 11) & 0x1;
692 if (pba > pal || pba < ZPCI_SDMA_ADDR || pal > ZPCI_EDMA_ADDR) {
693 program_interrupt(env, PGM_OPERAND, 6);
694 return -EINVAL;
697 /* currently we only support designation type 1 with translation */
698 if (!(dt == ZPCI_IOTA_RTTO && t)) {
699 error_report("unsupported ioat dt %d t %d", dt, t);
700 program_interrupt(env, PGM_OPERAND, 6);
701 return -EINVAL;
704 pbdev->pba = pba;
705 pbdev->pal = pal;
706 pbdev->g_iota = g_iota;
708 s390_pci_iommu_enable(pbdev);
710 return 0;
713 void pci_dereg_ioat(S390PCIBusDevice *pbdev)
715 s390_pci_iommu_disable(pbdev);
716 pbdev->pba = 0;
717 pbdev->pal = 0;
718 pbdev->g_iota = 0;
721 int mpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
723 CPUS390XState *env = &cpu->env;
724 uint8_t oc, dmaas;
725 uint32_t fh;
726 ZpciFib fib;
727 S390PCIBusDevice *pbdev;
728 uint64_t cc = ZPCI_PCI_LS_OK;
730 if (env->psw.mask & PSW_MASK_PSTATE) {
731 program_interrupt(env, PGM_PRIVILEGED, 6);
732 return 0;
735 oc = env->regs[r1] & 0xff;
736 dmaas = (env->regs[r1] >> 16) & 0xff;
737 fh = env->regs[r1] >> 32;
739 if (fiba & 0x7) {
740 program_interrupt(env, PGM_SPECIFICATION, 6);
741 return 0;
744 pbdev = s390_pci_find_dev_by_fh(fh);
745 if (!pbdev || !(pbdev->fh & FH_ENABLED)) {
746 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh);
747 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
748 return 0;
751 if (s390_cpu_virt_mem_read(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
752 return 0;
755 if (fib.fmt != 0) {
756 program_interrupt(env, PGM_OPERAND, 6);
757 return 0;
760 switch (oc) {
761 case ZPCI_MOD_FC_REG_INT:
762 if (pbdev->summary_ind) {
763 cc = ZPCI_PCI_LS_ERR;
764 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
765 } else if (reg_irqs(env, pbdev, fib)) {
766 cc = ZPCI_PCI_LS_ERR;
767 s390_set_status_code(env, r1, ZPCI_MOD_ST_RES_NOT_AVAIL);
769 break;
770 case ZPCI_MOD_FC_DEREG_INT:
771 if (!pbdev->summary_ind) {
772 cc = ZPCI_PCI_LS_ERR;
773 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
774 } else {
775 pci_dereg_irqs(pbdev);
777 break;
778 case ZPCI_MOD_FC_REG_IOAT:
779 if (dmaas != 0) {
780 cc = ZPCI_PCI_LS_ERR;
781 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
782 } else if (pbdev->iommu_enabled) {
783 cc = ZPCI_PCI_LS_ERR;
784 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
785 } else if (reg_ioat(env, pbdev, fib)) {
786 cc = ZPCI_PCI_LS_ERR;
787 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
789 break;
790 case ZPCI_MOD_FC_DEREG_IOAT:
791 if (dmaas != 0) {
792 cc = ZPCI_PCI_LS_ERR;
793 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
794 } else if (!pbdev->iommu_enabled) {
795 cc = ZPCI_PCI_LS_ERR;
796 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
797 } else {
798 pci_dereg_ioat(pbdev);
800 break;
801 case ZPCI_MOD_FC_REREG_IOAT:
802 if (dmaas != 0) {
803 cc = ZPCI_PCI_LS_ERR;
804 s390_set_status_code(env, r1, ZPCI_MOD_ST_DMAAS_INVAL);
805 } else if (!pbdev->iommu_enabled) {
806 cc = ZPCI_PCI_LS_ERR;
807 s390_set_status_code(env, r1, ZPCI_MOD_ST_SEQUENCE);
808 } else {
809 pci_dereg_ioat(pbdev);
810 if (reg_ioat(env, pbdev, fib)) {
811 cc = ZPCI_PCI_LS_ERR;
812 s390_set_status_code(env, r1, ZPCI_MOD_ST_INSUF_RES);
815 break;
816 case ZPCI_MOD_FC_RESET_ERROR:
817 pbdev->error_state = false;
818 pbdev->lgstg_blocked = false;
819 break;
820 case ZPCI_MOD_FC_RESET_BLOCK:
821 pbdev->lgstg_blocked = false;
822 break;
823 case ZPCI_MOD_FC_SET_MEASURE:
824 pbdev->fmb_addr = ldq_p(&fib.fmb_addr);
825 break;
826 default:
827 program_interrupt(&cpu->env, PGM_OPERAND, 6);
828 cc = ZPCI_PCI_LS_ERR;
831 setcc(cpu, cc);
832 return 0;
835 int stpcifc_service_call(S390CPU *cpu, uint8_t r1, uint64_t fiba, uint8_t ar)
837 CPUS390XState *env = &cpu->env;
838 uint32_t fh;
839 ZpciFib fib;
840 S390PCIBusDevice *pbdev;
841 uint32_t data;
842 uint64_t cc = ZPCI_PCI_LS_OK;
844 if (env->psw.mask & PSW_MASK_PSTATE) {
845 program_interrupt(env, PGM_PRIVILEGED, 6);
846 return 0;
849 fh = env->regs[r1] >> 32;
851 if (fiba & 0x7) {
852 program_interrupt(env, PGM_SPECIFICATION, 6);
853 return 0;
856 pbdev = s390_pci_find_dev_by_fh(fh);
857 if (!pbdev) {
858 setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
859 return 0;
862 memset(&fib, 0, sizeof(fib));
863 stq_p(&fib.pba, pbdev->pba);
864 stq_p(&fib.pal, pbdev->pal);
865 stq_p(&fib.iota, pbdev->g_iota);
866 stq_p(&fib.aibv, pbdev->routes.adapter.ind_addr);
867 stq_p(&fib.aisb, pbdev->routes.adapter.summary_addr);
868 stq_p(&fib.fmb_addr, pbdev->fmb_addr);
870 data = ((uint32_t)pbdev->isc << 28) | ((uint32_t)pbdev->noi << 16) |
871 ((uint32_t)pbdev->routes.adapter.ind_offset << 8) |
872 ((uint32_t)pbdev->sum << 7) | pbdev->routes.adapter.summary_offset;
873 stl_p(&fib.data, data);
875 if (pbdev->fh & FH_ENABLED) {
876 fib.fc |= 0x80;
879 if (pbdev->error_state) {
880 fib.fc |= 0x40;
883 if (pbdev->lgstg_blocked) {
884 fib.fc |= 0x20;
887 if (pbdev->g_iota) {
888 fib.fc |= 0x10;
891 if (s390_cpu_virt_mem_write(cpu, fiba, ar, (uint8_t *)&fib, sizeof(fib))) {
892 return 0;
895 setcc(cpu, cc);
896 return 0;