2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
17 #include "s390-pci-inst.h"
18 #include "s390-pci-bus.h"
19 #include "exec/memory-internal.h"
20 #include "qemu/error-report.h"
21 #include "sysemu/hw_accel.h"
23 #ifndef DEBUG_S390PCI_INST
24 #define DEBUG_S390PCI_INST 0
27 #define DPRINTF(fmt, ...) \
29 if (DEBUG_S390PCI_INST) { \
30 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
34 static void s390_set_status_code(CPUS390XState
*env
,
35 uint8_t r
, uint64_t status_code
)
37 env
->regs
[r
] &= ~0xff000000ULL
;
38 env
->regs
[r
] |= (status_code
& 0xff) << 24;
41 static int list_pci(ClpReqRspListPci
*rrb
, uint8_t *cc
)
43 S390PCIBusDevice
*pbdev
= NULL
;
44 S390pciState
*s
= s390_get_phb();
45 uint32_t res_code
, initial_l2
, g_l2
;
47 uint64_t resume_token
;
50 if (lduw_p(&rrb
->request
.hdr
.len
) != 32) {
51 res_code
= CLP_RC_LEN
;
56 if ((ldl_p(&rrb
->request
.fmt
) & CLP_MASK_FMT
) != 0) {
57 res_code
= CLP_RC_FMT
;
62 if ((ldl_p(&rrb
->request
.fmt
) & ~CLP_MASK_FMT
) != 0 ||
63 ldq_p(&rrb
->request
.reserved1
) != 0) {
64 res_code
= CLP_RC_RESNOT0
;
69 resume_token
= ldq_p(&rrb
->request
.resume_token
);
72 pbdev
= s390_pci_find_dev_by_idx(s
, resume_token
);
74 res_code
= CLP_RC_LISTPCI_BADRT
;
79 pbdev
= s390_pci_find_next_avail_dev(s
, NULL
);
82 if (lduw_p(&rrb
->response
.hdr
.len
) < 48) {
88 initial_l2
= lduw_p(&rrb
->response
.hdr
.len
);
89 if ((initial_l2
- LIST_PCI_HDR_LEN
) % sizeof(ClpFhListEntry
)
91 res_code
= CLP_RC_LEN
;
97 stl_p(&rrb
->response
.fmt
, 0);
98 stq_p(&rrb
->response
.reserved1
, 0);
99 stl_p(&rrb
->response
.mdd
, FH_MASK_SHM
);
100 stw_p(&rrb
->response
.max_fn
, PCI_MAX_FUNCTIONS
);
101 rrb
->response
.flags
= UID_CHECKING_ENABLED
;
102 rrb
->response
.entry_size
= sizeof(ClpFhListEntry
);
105 g_l2
= LIST_PCI_HDR_LEN
;
106 while (g_l2
< initial_l2
&& pbdev
) {
107 stw_p(&rrb
->response
.fh_list
[i
].device_id
,
108 pci_get_word(pbdev
->pdev
->config
+ PCI_DEVICE_ID
));
109 stw_p(&rrb
->response
.fh_list
[i
].vendor_id
,
110 pci_get_word(pbdev
->pdev
->config
+ PCI_VENDOR_ID
));
111 /* Ignore RESERVED devices. */
112 stl_p(&rrb
->response
.fh_list
[i
].config
,
113 pbdev
->state
== ZPCI_FS_STANDBY
? 0 : 1 << 31);
114 stl_p(&rrb
->response
.fh_list
[i
].fid
, pbdev
->fid
);
115 stl_p(&rrb
->response
.fh_list
[i
].fh
, pbdev
->fh
);
117 g_l2
+= sizeof(ClpFhListEntry
);
118 /* Add endian check for DPRINTF? */
119 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
121 lduw_p(&rrb
->response
.fh_list
[i
].vendor_id
),
122 lduw_p(&rrb
->response
.fh_list
[i
].device_id
),
123 ldl_p(&rrb
->response
.fh_list
[i
].fid
),
124 ldl_p(&rrb
->response
.fh_list
[i
].fh
));
125 pbdev
= s390_pci_find_next_avail_dev(s
, pbdev
);
132 resume_token
= pbdev
->fh
& FH_MASK_INDEX
;
134 stq_p(&rrb
->response
.resume_token
, resume_token
);
135 stw_p(&rrb
->response
.hdr
.len
, g_l2
);
136 stw_p(&rrb
->response
.hdr
.rsp
, CLP_RC_OK
);
139 DPRINTF("list pci failed rc 0x%x\n", rc
);
140 stw_p(&rrb
->response
.hdr
.rsp
, res_code
);
145 int clp_service_call(S390CPU
*cpu
, uint8_t r2
, uintptr_t ra
)
149 S390PCIBusDevice
*pbdev
;
152 uint8_t buffer
[4096 * 2];
154 CPUS390XState
*env
= &cpu
->env
;
155 S390pciState
*s
= s390_get_phb();
158 cpu_synchronize_state(CPU(cpu
));
160 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
161 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
165 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
, sizeof(*reqh
))) {
166 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
169 reqh
= (ClpReqHdr
*)buffer
;
170 req_len
= lduw_p(&reqh
->len
);
171 if (req_len
< 16 || req_len
> 8184 || (req_len
% 8 != 0)) {
172 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
176 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
177 req_len
+ sizeof(*resh
))) {
178 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
181 resh
= (ClpRspHdr
*)(buffer
+ req_len
);
182 res_len
= lduw_p(&resh
->len
);
183 if (res_len
< 8 || res_len
> 8176 || (res_len
% 8 != 0)) {
184 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
187 if ((req_len
+ res_len
) > 8192) {
188 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
192 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
193 req_len
+ res_len
)) {
194 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
199 stw_p(&resh
->rsp
, CLP_RC_LEN
);
203 switch (lduw_p(&reqh
->cmd
)) {
205 ClpReqRspListPci
*rrb
= (ClpReqRspListPci
*)buffer
;
209 case CLP_SET_PCI_FN
: {
210 ClpReqSetPci
*reqsetpci
= (ClpReqSetPci
*)reqh
;
211 ClpRspSetPci
*ressetpci
= (ClpRspSetPci
*)resh
;
213 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqsetpci
->fh
));
215 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
219 switch (reqsetpci
->oc
) {
220 case CLP_SET_ENABLE_PCI_FN
:
221 switch (reqsetpci
->ndas
) {
223 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_DMAAS
);
228 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_RES
);
232 if (pbdev
->fh
& FH_MASK_ENABLE
) {
233 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
237 pbdev
->fh
|= FH_MASK_ENABLE
;
238 pbdev
->state
= ZPCI_FS_ENABLED
;
239 stl_p(&ressetpci
->fh
, pbdev
->fh
);
240 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
242 case CLP_SET_DISABLE_PCI_FN
:
243 if (!(pbdev
->fh
& FH_MASK_ENABLE
)) {
244 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
247 device_reset(DEVICE(pbdev
));
248 pbdev
->fh
&= ~FH_MASK_ENABLE
;
249 pbdev
->state
= ZPCI_FS_DISABLED
;
250 stl_p(&ressetpci
->fh
, pbdev
->fh
);
251 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
254 DPRINTF("unknown set pci command\n");
255 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
260 case CLP_QUERY_PCI_FN
: {
261 ClpReqQueryPci
*reqquery
= (ClpReqQueryPci
*)reqh
;
262 ClpRspQueryPci
*resquery
= (ClpRspQueryPci
*)resh
;
264 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqquery
->fh
));
266 DPRINTF("query pci no pci dev\n");
267 stw_p(&resquery
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
271 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
272 uint32_t data
= pci_get_long(pbdev
->pdev
->config
+
273 PCI_BASE_ADDRESS_0
+ (i
* 4));
275 stl_p(&resquery
->bar
[i
], data
);
276 resquery
->bar_size
[i
] = pbdev
->pdev
->io_regions
[i
].size
?
277 ctz64(pbdev
->pdev
->io_regions
[i
].size
) : 0;
278 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64
"barsize 0x%x\n", i
,
279 ldl_p(&resquery
->bar
[i
]),
280 pbdev
->pdev
->io_regions
[i
].size
,
281 resquery
->bar_size
[i
]);
284 stq_p(&resquery
->sdma
, ZPCI_SDMA_ADDR
);
285 stq_p(&resquery
->edma
, ZPCI_EDMA_ADDR
);
286 stl_p(&resquery
->fid
, pbdev
->fid
);
287 stw_p(&resquery
->pchid
, 0);
288 stw_p(&resquery
->ug
, 1);
289 stl_p(&resquery
->uid
, pbdev
->uid
);
290 stw_p(&resquery
->hdr
.rsp
, CLP_RC_OK
);
293 case CLP_QUERY_PCI_FNGRP
: {
294 ClpRspQueryPciGrp
*resgrp
= (ClpRspQueryPciGrp
*)resh
;
296 stq_p(&resgrp
->dasm
, 0);
297 stq_p(&resgrp
->msia
, ZPCI_MSI_ADDR
);
298 stw_p(&resgrp
->mui
, 0);
299 stw_p(&resgrp
->i
, 128);
300 stw_p(&resgrp
->maxstbl
, 128);
303 stw_p(&resgrp
->hdr
.rsp
, CLP_RC_OK
);
307 DPRINTF("unknown clp command\n");
308 stw_p(&resh
->rsp
, CLP_RC_CMD
);
313 if (s390_cpu_virt_mem_write(cpu
, env
->regs
[r2
], r2
, buffer
,
314 req_len
+ res_len
)) {
315 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
323 * Swap data contained in s390x big endian registers to little endian
326 * @ptr: a pointer to a uint64_t data field
327 * @len: the length of the valid data, must be 1,2,4 or 8
329 static int zpci_endian_swap(uint64_t *ptr
, uint8_t len
)
331 uint64_t data
= *ptr
;
337 data
= bswap16(data
);
340 data
= bswap32(data
);
343 data
= bswap64(data
);
352 static MemoryRegion
*s390_get_subregion(MemoryRegion
*mr
, uint64_t offset
,
355 MemoryRegion
*subregion
;
356 uint64_t subregion_size
;
358 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
359 subregion_size
= int128_get64(subregion
->size
);
360 if ((offset
>= subregion
->addr
) &&
361 (offset
+ len
) <= (subregion
->addr
+ subregion_size
)) {
369 static MemTxResult
zpci_read_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
370 uint64_t offset
, uint64_t *data
, uint8_t len
)
374 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
375 mr
= s390_get_subregion(mr
, offset
, len
);
377 return memory_region_dispatch_read(mr
, offset
, data
, len
,
378 MEMTXATTRS_UNSPECIFIED
);
381 int pcilg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
383 CPUS390XState
*env
= &cpu
->env
;
384 S390PCIBusDevice
*pbdev
;
392 cpu_synchronize_state(CPU(cpu
));
394 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
395 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
400 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
404 fh
= env
->regs
[r2
] >> 32;
405 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
406 len
= env
->regs
[r2
] & 0xf;
407 offset
= env
->regs
[r2
+ 1];
409 if (!(fh
& FH_MASK_ENABLE
)) {
410 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
414 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
416 DPRINTF("pcilg no pci dev\n");
417 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
421 switch (pbdev
->state
) {
422 case ZPCI_FS_PERMANENT_ERROR
:
424 setcc(cpu
, ZPCI_PCI_LS_ERR
);
425 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
432 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
433 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
434 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
437 result
= zpci_read_bar(pbdev
, pcias
, offset
, &data
, len
);
438 if (result
!= MEMTX_OK
) {
439 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
443 case ZPCI_CONFIG_BAR
:
444 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
445 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
448 data
= pci_host_config_read_common(
449 pbdev
->pdev
, offset
, pci_config_size(pbdev
->pdev
), len
);
451 if (zpci_endian_swap(&data
, len
)) {
452 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
457 DPRINTF("pcilg invalid space\n");
458 setcc(cpu
, ZPCI_PCI_LS_ERR
);
459 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
463 env
->regs
[r1
] = data
;
464 setcc(cpu
, ZPCI_PCI_LS_OK
);
468 static MemTxResult
zpci_write_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
469 uint64_t offset
, uint64_t data
, uint8_t len
)
473 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
474 mr
= s390_get_subregion(mr
, offset
, len
);
476 return memory_region_dispatch_write(mr
, offset
, data
, len
,
477 MEMTXATTRS_UNSPECIFIED
);
480 int pcistg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
482 CPUS390XState
*env
= &cpu
->env
;
483 uint64_t offset
, data
;
484 S390PCIBusDevice
*pbdev
;
490 cpu_synchronize_state(CPU(cpu
));
492 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
493 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
498 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
502 fh
= env
->regs
[r2
] >> 32;
503 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
504 len
= env
->regs
[r2
] & 0xf;
505 offset
= env
->regs
[r2
+ 1];
506 data
= env
->regs
[r1
];
508 if (!(fh
& FH_MASK_ENABLE
)) {
509 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
513 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
515 DPRINTF("pcistg no pci dev\n");
516 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
520 switch (pbdev
->state
) {
521 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
522 * are already covered by the FH_MASK_ENABLE check above
524 case ZPCI_FS_PERMANENT_ERROR
:
526 setcc(cpu
, ZPCI_PCI_LS_ERR
);
527 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
534 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
535 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
537 * A length of 0 is invalid and length should not cross a double word
539 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
540 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
544 result
= zpci_write_bar(pbdev
, pcias
, offset
, data
, len
);
545 if (result
!= MEMTX_OK
) {
546 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
550 case ZPCI_CONFIG_BAR
:
551 /* ZPCI uses the pseudo BAR number 15 as configuration space */
552 /* possible access lengths are 1,2,4 and must not cross a word */
553 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
554 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
557 /* len = 1,2,4 so we do not need to test */
558 zpci_endian_swap(&data
, len
);
559 pci_host_config_write_common(pbdev
->pdev
, offset
,
560 pci_config_size(pbdev
->pdev
),
564 DPRINTF("pcistg invalid space\n");
565 setcc(cpu
, ZPCI_PCI_LS_ERR
);
566 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
570 setcc(cpu
, ZPCI_PCI_LS_OK
);
574 int rpcit_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
576 CPUS390XState
*env
= &cpu
->env
;
578 S390PCIBusDevice
*pbdev
;
582 IOMMUMemoryRegion
*iommu_mr
;
583 IOMMUMemoryRegionClass
*imrc
;
585 cpu_synchronize_state(CPU(cpu
));
587 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
588 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
593 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
597 fh
= env
->regs
[r1
] >> 32;
598 start
= env
->regs
[r2
];
599 end
= start
+ env
->regs
[r2
+ 1];
601 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
603 DPRINTF("rpcit no pci dev\n");
604 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
608 switch (pbdev
->state
) {
609 case ZPCI_FS_RESERVED
:
610 case ZPCI_FS_STANDBY
:
611 case ZPCI_FS_DISABLED
:
612 case ZPCI_FS_PERMANENT_ERROR
:
613 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
616 setcc(cpu
, ZPCI_PCI_LS_ERR
);
617 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_ERROR_RECOVER
);
623 iommu
= pbdev
->iommu
;
624 if (!iommu
->g_iota
) {
625 pbdev
->state
= ZPCI_FS_ERROR
;
626 setcc(cpu
, ZPCI_PCI_LS_ERR
);
627 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INSUF_RES
);
628 s390_pci_generate_error_event(ERR_EVENT_INVALAS
, pbdev
->fh
, pbdev
->fid
,
633 if (end
< iommu
->pba
|| start
> iommu
->pal
) {
634 pbdev
->state
= ZPCI_FS_ERROR
;
635 setcc(cpu
, ZPCI_PCI_LS_ERR
);
636 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INSUF_RES
);
637 s390_pci_generate_error_event(ERR_EVENT_OORANGE
, pbdev
->fh
, pbdev
->fid
,
642 iommu_mr
= &iommu
->iommu_mr
;
643 imrc
= IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr
);
645 while (start
< end
) {
646 entry
= imrc
->translate(iommu_mr
, start
, IOMMU_NONE
);
648 if (!entry
.translated_addr
) {
649 pbdev
->state
= ZPCI_FS_ERROR
;
650 setcc(cpu
, ZPCI_PCI_LS_ERR
);
651 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INSUF_RES
);
652 s390_pci_generate_error_event(ERR_EVENT_SERR
, pbdev
->fh
, pbdev
->fid
,
653 start
, ERR_EVENT_Q_BIT
);
657 memory_region_notify_iommu(iommu_mr
, entry
);
658 start
+= entry
.addr_mask
+ 1;
661 setcc(cpu
, ZPCI_PCI_LS_OK
);
666 int pcistb_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r3
, uint64_t gaddr
,
667 uint8_t ar
, uintptr_t ra
)
669 CPUS390XState
*env
= &cpu
->env
;
670 S390PCIBusDevice
*pbdev
;
680 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
681 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
685 fh
= env
->regs
[r1
] >> 32;
686 pcias
= (env
->regs
[r1
] >> 16) & 0xf;
687 len
= env
->regs
[r1
] & 0xff;
688 offset
= env
->regs
[r3
];
690 if (!(fh
& FH_MASK_ENABLE
)) {
691 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
695 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
697 DPRINTF("pcistb no pci dev fh 0x%x\n", fh
);
698 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
702 switch (pbdev
->state
) {
703 case ZPCI_FS_PERMANENT_ERROR
:
705 setcc(cpu
, ZPCI_PCI_LS_ERR
);
706 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_BLOCKED
);
712 if (pcias
> ZPCI_IO_BAR_MAX
) {
713 DPRINTF("pcistb invalid space\n");
714 setcc(cpu
, ZPCI_PCI_LS_ERR
);
715 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INVAL_AS
);
719 /* Verify the address, offset and length */
720 /* offset must be a multiple of 8 */
722 goto specification_error
;
724 /* Length must be greater than 8, a multiple of 8 */
725 /* and not greater than maxstbl */
726 if ((len
<= 8) || (len
% 8) || (len
> pbdev
->maxstbl
)) {
727 goto specification_error
;
729 /* Do not cross a 4K-byte boundary */
730 if (((offset
& 0xfff) + len
) > 0x1000) {
731 goto specification_error
;
733 /* Guest address must be double word aligned */
734 if (gaddr
& 0x07UL
) {
735 goto specification_error
;
738 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
739 mr
= s390_get_subregion(mr
, offset
, len
);
742 if (!memory_region_access_valid(mr
, offset
, len
, true)) {
743 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
747 if (s390_cpu_virt_mem_read(cpu
, gaddr
, ar
, buffer
, len
)) {
748 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
752 for (i
= 0; i
< len
/ 8; i
++) {
753 result
= memory_region_dispatch_write(mr
, offset
+ i
* 8,
754 ldq_p(buffer
+ i
* 8), 8,
755 MEMTXATTRS_UNSPECIFIED
);
756 if (result
!= MEMTX_OK
) {
757 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
762 setcc(cpu
, ZPCI_PCI_LS_OK
);
766 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
770 static int reg_irqs(CPUS390XState
*env
, S390PCIBusDevice
*pbdev
, ZpciFib fib
)
773 uint8_t isc
= FIB_DATA_ISC(ldl_p(&fib
.data
));
775 pbdev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
776 CSS_IO_ADAPTER_PCI
, isc
);
777 pbdev
->summary_ind
= get_indicator(ldq_p(&fib
.aisb
), sizeof(uint64_t));
778 len
= BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib
.data
))) * sizeof(unsigned long);
779 pbdev
->indicator
= get_indicator(ldq_p(&fib
.aibv
), len
);
781 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
786 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
791 pbdev
->routes
.adapter
.summary_addr
= ldq_p(&fib
.aisb
);
792 pbdev
->routes
.adapter
.summary_offset
= FIB_DATA_AISBO(ldl_p(&fib
.data
));
793 pbdev
->routes
.adapter
.ind_addr
= ldq_p(&fib
.aibv
);
794 pbdev
->routes
.adapter
.ind_offset
= FIB_DATA_AIBVO(ldl_p(&fib
.data
));
796 pbdev
->noi
= FIB_DATA_NOI(ldl_p(&fib
.data
));
797 pbdev
->sum
= FIB_DATA_SUM(ldl_p(&fib
.data
));
799 DPRINTF("reg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
802 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
803 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
804 pbdev
->summary_ind
= NULL
;
805 pbdev
->indicator
= NULL
;
809 int pci_dereg_irqs(S390PCIBusDevice
*pbdev
)
811 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
812 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
814 pbdev
->summary_ind
= NULL
;
815 pbdev
->indicator
= NULL
;
816 pbdev
->routes
.adapter
.summary_addr
= 0;
817 pbdev
->routes
.adapter
.summary_offset
= 0;
818 pbdev
->routes
.adapter
.ind_addr
= 0;
819 pbdev
->routes
.adapter
.ind_offset
= 0;
824 DPRINTF("dereg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
828 static int reg_ioat(CPUS390XState
*env
, S390PCIIOMMU
*iommu
, ZpciFib fib
,
831 uint64_t pba
= ldq_p(&fib
.pba
);
832 uint64_t pal
= ldq_p(&fib
.pal
);
833 uint64_t g_iota
= ldq_p(&fib
.iota
);
834 uint8_t dt
= (g_iota
>> 2) & 0x7;
835 uint8_t t
= (g_iota
>> 11) & 0x1;
837 if (pba
> pal
|| pba
< ZPCI_SDMA_ADDR
|| pal
> ZPCI_EDMA_ADDR
) {
838 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
842 /* currently we only support designation type 1 with translation */
843 if (!(dt
== ZPCI_IOTA_RTTO
&& t
)) {
844 error_report("unsupported ioat dt %d t %d", dt
, t
);
845 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
851 iommu
->g_iota
= g_iota
;
853 s390_pci_iommu_enable(iommu
);
858 void pci_dereg_ioat(S390PCIIOMMU
*iommu
)
860 s390_pci_iommu_disable(iommu
);
866 int mpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
869 CPUS390XState
*env
= &cpu
->env
;
873 S390PCIBusDevice
*pbdev
;
874 uint64_t cc
= ZPCI_PCI_LS_OK
;
876 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
877 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
881 oc
= env
->regs
[r1
] & 0xff;
882 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
883 fh
= env
->regs
[r1
] >> 32;
886 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
890 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
892 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh
);
893 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
897 switch (pbdev
->state
) {
898 case ZPCI_FS_RESERVED
:
899 case ZPCI_FS_STANDBY
:
900 case ZPCI_FS_DISABLED
:
901 case ZPCI_FS_PERMANENT_ERROR
:
902 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
908 if (s390_cpu_virt_mem_read(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
909 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
914 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
919 case ZPCI_MOD_FC_REG_INT
:
920 if (pbdev
->summary_ind
) {
921 cc
= ZPCI_PCI_LS_ERR
;
922 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
923 } else if (reg_irqs(env
, pbdev
, fib
)) {
924 cc
= ZPCI_PCI_LS_ERR
;
925 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_RES_NOT_AVAIL
);
928 case ZPCI_MOD_FC_DEREG_INT
:
929 if (!pbdev
->summary_ind
) {
930 cc
= ZPCI_PCI_LS_ERR
;
931 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
933 pci_dereg_irqs(pbdev
);
936 case ZPCI_MOD_FC_REG_IOAT
:
938 cc
= ZPCI_PCI_LS_ERR
;
939 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
940 } else if (pbdev
->iommu
->enabled
) {
941 cc
= ZPCI_PCI_LS_ERR
;
942 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
943 } else if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
944 cc
= ZPCI_PCI_LS_ERR
;
945 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
948 case ZPCI_MOD_FC_DEREG_IOAT
:
950 cc
= ZPCI_PCI_LS_ERR
;
951 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
952 } else if (!pbdev
->iommu
->enabled
) {
953 cc
= ZPCI_PCI_LS_ERR
;
954 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
956 pci_dereg_ioat(pbdev
->iommu
);
959 case ZPCI_MOD_FC_REREG_IOAT
:
961 cc
= ZPCI_PCI_LS_ERR
;
962 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
963 } else if (!pbdev
->iommu
->enabled
) {
964 cc
= ZPCI_PCI_LS_ERR
;
965 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
967 pci_dereg_ioat(pbdev
->iommu
);
968 if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
969 cc
= ZPCI_PCI_LS_ERR
;
970 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
974 case ZPCI_MOD_FC_RESET_ERROR
:
975 switch (pbdev
->state
) {
976 case ZPCI_FS_BLOCKED
:
978 pbdev
->state
= ZPCI_FS_ENABLED
;
981 cc
= ZPCI_PCI_LS_ERR
;
982 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
985 case ZPCI_MOD_FC_RESET_BLOCK
:
986 switch (pbdev
->state
) {
988 pbdev
->state
= ZPCI_FS_BLOCKED
;
991 cc
= ZPCI_PCI_LS_ERR
;
992 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
995 case ZPCI_MOD_FC_SET_MEASURE
:
996 pbdev
->fmb_addr
= ldq_p(&fib
.fmb_addr
);
999 s390_program_interrupt(&cpu
->env
, PGM_OPERAND
, 6, ra
);
1000 cc
= ZPCI_PCI_LS_ERR
;
1007 int stpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
1010 CPUS390XState
*env
= &cpu
->env
;
1014 S390PCIBusDevice
*pbdev
;
1016 uint64_t cc
= ZPCI_PCI_LS_OK
;
1018 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1019 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
1023 fh
= env
->regs
[r1
] >> 32;
1024 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1027 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1028 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_INVAL_DMAAS
);
1033 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1037 pbdev
= s390_pci_find_dev_by_idx(s390_get_phb(), fh
& FH_MASK_INDEX
);
1039 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1043 memset(&fib
, 0, sizeof(fib
));
1045 switch (pbdev
->state
) {
1046 case ZPCI_FS_RESERVED
:
1047 case ZPCI_FS_STANDBY
:
1048 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1050 case ZPCI_FS_DISABLED
:
1051 if (fh
& FH_MASK_ENABLE
) {
1052 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1056 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1057 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1060 case ZPCI_FS_BLOCKED
:
1062 case ZPCI_FS_ENABLED
:
1064 if (pbdev
->iommu
->enabled
) {
1067 if (!(fh
& FH_MASK_ENABLE
)) {
1068 env
->regs
[r1
] |= 1ULL << 63;
1071 case ZPCI_FS_PERMANENT_ERROR
:
1072 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1073 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_PERM_ERROR
);
1077 stq_p(&fib
.pba
, pbdev
->iommu
->pba
);
1078 stq_p(&fib
.pal
, pbdev
->iommu
->pal
);
1079 stq_p(&fib
.iota
, pbdev
->iommu
->g_iota
);
1080 stq_p(&fib
.aibv
, pbdev
->routes
.adapter
.ind_addr
);
1081 stq_p(&fib
.aisb
, pbdev
->routes
.adapter
.summary_addr
);
1082 stq_p(&fib
.fmb_addr
, pbdev
->fmb_addr
);
1084 data
= ((uint32_t)pbdev
->isc
<< 28) | ((uint32_t)pbdev
->noi
<< 16) |
1085 ((uint32_t)pbdev
->routes
.adapter
.ind_offset
<< 8) |
1086 ((uint32_t)pbdev
->sum
<< 7) | pbdev
->routes
.adapter
.summary_offset
;
1087 stl_p(&fib
.data
, data
);
1090 if (s390_cpu_virt_mem_write(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1091 s390_cpu_virt_mem_handle_exc(cpu
, ra
);