2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
14 #include "qemu/osdep.h"
15 #include "qemu-common.h"
17 #include "s390-pci-inst.h"
18 #include "s390-pci-bus.h"
19 #include "exec/memory-internal.h"
20 #include "qemu/error-report.h"
21 #include "sysemu/hw_accel.h"
23 #ifndef DEBUG_S390PCI_INST
24 #define DEBUG_S390PCI_INST 0
27 #define DPRINTF(fmt, ...) \
29 if (DEBUG_S390PCI_INST) { \
30 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
34 static void s390_set_status_code(CPUS390XState
*env
,
35 uint8_t r
, uint64_t status_code
)
37 env
->regs
[r
] &= ~0xff000000ULL
;
38 env
->regs
[r
] |= (status_code
& 0xff) << 24;
41 static int list_pci(ClpReqRspListPci
*rrb
, uint8_t *cc
)
43 S390PCIBusDevice
*pbdev
= NULL
;
44 S390pciState
*s
= s390_get_phb();
45 uint32_t res_code
, initial_l2
, g_l2
;
47 uint64_t resume_token
;
50 if (lduw_p(&rrb
->request
.hdr
.len
) != 32) {
51 res_code
= CLP_RC_LEN
;
56 if ((ldl_p(&rrb
->request
.fmt
) & CLP_MASK_FMT
) != 0) {
57 res_code
= CLP_RC_FMT
;
62 if ((ldl_p(&rrb
->request
.fmt
) & ~CLP_MASK_FMT
) != 0 ||
63 ldq_p(&rrb
->request
.reserved1
) != 0) {
64 res_code
= CLP_RC_RESNOT0
;
69 resume_token
= ldq_p(&rrb
->request
.resume_token
);
72 pbdev
= s390_pci_find_dev_by_idx(s
, resume_token
);
74 res_code
= CLP_RC_LISTPCI_BADRT
;
79 pbdev
= s390_pci_find_next_avail_dev(s
, NULL
);
82 if (lduw_p(&rrb
->response
.hdr
.len
) < 48) {
88 initial_l2
= lduw_p(&rrb
->response
.hdr
.len
);
89 if ((initial_l2
- LIST_PCI_HDR_LEN
) % sizeof(ClpFhListEntry
)
91 res_code
= CLP_RC_LEN
;
97 stl_p(&rrb
->response
.fmt
, 0);
98 stq_p(&rrb
->response
.reserved1
, 0);
99 stl_p(&rrb
->response
.mdd
, FH_MASK_SHM
);
100 stw_p(&rrb
->response
.max_fn
, PCI_MAX_FUNCTIONS
);
101 rrb
->response
.flags
= UID_CHECKING_ENABLED
;
102 rrb
->response
.entry_size
= sizeof(ClpFhListEntry
);
105 g_l2
= LIST_PCI_HDR_LEN
;
106 while (g_l2
< initial_l2
&& pbdev
) {
107 stw_p(&rrb
->response
.fh_list
[i
].device_id
,
108 pci_get_word(pbdev
->pdev
->config
+ PCI_DEVICE_ID
));
109 stw_p(&rrb
->response
.fh_list
[i
].vendor_id
,
110 pci_get_word(pbdev
->pdev
->config
+ PCI_VENDOR_ID
));
111 /* Ignore RESERVED devices. */
112 stl_p(&rrb
->response
.fh_list
[i
].config
,
113 pbdev
->state
== ZPCI_FS_STANDBY
? 0 : 1 << 31);
114 stl_p(&rrb
->response
.fh_list
[i
].fid
, pbdev
->fid
);
115 stl_p(&rrb
->response
.fh_list
[i
].fh
, pbdev
->fh
);
117 g_l2
+= sizeof(ClpFhListEntry
);
118 /* Add endian check for DPRINTF? */
119 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
121 lduw_p(&rrb
->response
.fh_list
[i
].vendor_id
),
122 lduw_p(&rrb
->response
.fh_list
[i
].device_id
),
123 ldl_p(&rrb
->response
.fh_list
[i
].fid
),
124 ldl_p(&rrb
->response
.fh_list
[i
].fh
));
125 pbdev
= s390_pci_find_next_avail_dev(s
, pbdev
);
132 resume_token
= pbdev
->fh
& FH_MASK_INDEX
;
134 stq_p(&rrb
->response
.resume_token
, resume_token
);
135 stw_p(&rrb
->response
.hdr
.len
, g_l2
);
136 stw_p(&rrb
->response
.hdr
.rsp
, CLP_RC_OK
);
139 DPRINTF("list pci failed rc 0x%x\n", rc
);
140 stw_p(&rrb
->response
.hdr
.rsp
, res_code
);
145 int clp_service_call(S390CPU
*cpu
, uint8_t r2
, uintptr_t ra
)
149 S390PCIBusDevice
*pbdev
;
152 uint8_t buffer
[4096 * 2];
154 CPUS390XState
*env
= &cpu
->env
;
155 S390pciState
*s
= s390_get_phb();
158 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
159 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
163 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
, sizeof(*reqh
))) {
164 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
167 reqh
= (ClpReqHdr
*)buffer
;
168 req_len
= lduw_p(&reqh
->len
);
169 if (req_len
< 16 || req_len
> 8184 || (req_len
% 8 != 0)) {
170 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
174 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
175 req_len
+ sizeof(*resh
))) {
176 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
179 resh
= (ClpRspHdr
*)(buffer
+ req_len
);
180 res_len
= lduw_p(&resh
->len
);
181 if (res_len
< 8 || res_len
> 8176 || (res_len
% 8 != 0)) {
182 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
185 if ((req_len
+ res_len
) > 8192) {
186 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
190 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
191 req_len
+ res_len
)) {
192 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
197 stw_p(&resh
->rsp
, CLP_RC_LEN
);
201 switch (lduw_p(&reqh
->cmd
)) {
203 ClpReqRspListPci
*rrb
= (ClpReqRspListPci
*)buffer
;
207 case CLP_SET_PCI_FN
: {
208 ClpReqSetPci
*reqsetpci
= (ClpReqSetPci
*)reqh
;
209 ClpRspSetPci
*ressetpci
= (ClpRspSetPci
*)resh
;
211 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqsetpci
->fh
));
213 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
217 switch (reqsetpci
->oc
) {
218 case CLP_SET_ENABLE_PCI_FN
:
219 switch (reqsetpci
->ndas
) {
221 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_DMAAS
);
226 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_RES
);
230 if (pbdev
->fh
& FH_MASK_ENABLE
) {
231 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
235 pbdev
->fh
|= FH_MASK_ENABLE
;
236 pbdev
->state
= ZPCI_FS_ENABLED
;
237 stl_p(&ressetpci
->fh
, pbdev
->fh
);
238 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
240 case CLP_SET_DISABLE_PCI_FN
:
241 if (!(pbdev
->fh
& FH_MASK_ENABLE
)) {
242 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
245 device_reset(DEVICE(pbdev
));
246 pbdev
->fh
&= ~FH_MASK_ENABLE
;
247 pbdev
->state
= ZPCI_FS_DISABLED
;
248 stl_p(&ressetpci
->fh
, pbdev
->fh
);
249 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
252 DPRINTF("unknown set pci command\n");
253 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
258 case CLP_QUERY_PCI_FN
: {
259 ClpReqQueryPci
*reqquery
= (ClpReqQueryPci
*)reqh
;
260 ClpRspQueryPci
*resquery
= (ClpRspQueryPci
*)resh
;
262 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqquery
->fh
));
264 DPRINTF("query pci no pci dev\n");
265 stw_p(&resquery
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
269 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
270 uint32_t data
= pci_get_long(pbdev
->pdev
->config
+
271 PCI_BASE_ADDRESS_0
+ (i
* 4));
273 stl_p(&resquery
->bar
[i
], data
);
274 resquery
->bar_size
[i
] = pbdev
->pdev
->io_regions
[i
].size
?
275 ctz64(pbdev
->pdev
->io_regions
[i
].size
) : 0;
276 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64
"barsize 0x%x\n", i
,
277 ldl_p(&resquery
->bar
[i
]),
278 pbdev
->pdev
->io_regions
[i
].size
,
279 resquery
->bar_size
[i
]);
282 stq_p(&resquery
->sdma
, ZPCI_SDMA_ADDR
);
283 stq_p(&resquery
->edma
, ZPCI_EDMA_ADDR
);
284 stl_p(&resquery
->fid
, pbdev
->fid
);
285 stw_p(&resquery
->pchid
, 0);
286 stw_p(&resquery
->ug
, 1);
287 stl_p(&resquery
->uid
, pbdev
->uid
);
288 stw_p(&resquery
->hdr
.rsp
, CLP_RC_OK
);
291 case CLP_QUERY_PCI_FNGRP
: {
292 ClpRspQueryPciGrp
*resgrp
= (ClpRspQueryPciGrp
*)resh
;
294 stq_p(&resgrp
->dasm
, 0);
295 stq_p(&resgrp
->msia
, ZPCI_MSI_ADDR
);
296 stw_p(&resgrp
->mui
, 0);
297 stw_p(&resgrp
->i
, 128);
298 stw_p(&resgrp
->maxstbl
, 128);
301 stw_p(&resgrp
->hdr
.rsp
, CLP_RC_OK
);
305 DPRINTF("unknown clp command\n");
306 stw_p(&resh
->rsp
, CLP_RC_CMD
);
311 if (s390_cpu_virt_mem_write(cpu
, env
->regs
[r2
], r2
, buffer
,
312 req_len
+ res_len
)) {
313 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
321 * Swap data contained in s390x big endian registers to little endian
324 * @ptr: a pointer to a uint64_t data field
325 * @len: the length of the valid data, must be 1,2,4 or 8
327 static int zpci_endian_swap(uint64_t *ptr
, uint8_t len
)
329 uint64_t data
= *ptr
;
335 data
= bswap16(data
);
338 data
= bswap32(data
);
341 data
= bswap64(data
);
350 static MemoryRegion
*s390_get_subregion(MemoryRegion
*mr
, uint64_t offset
,
353 MemoryRegion
*subregion
;
354 uint64_t subregion_size
;
356 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
357 subregion_size
= int128_get64(subregion
->size
);
358 if ((offset
>= subregion
->addr
) &&
359 (offset
+ len
) <= (subregion
->addr
+ subregion_size
)) {
367 static MemTxResult
zpci_read_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
368 uint64_t offset
, uint64_t *data
, uint8_t len
)
372 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
373 mr
= s390_get_subregion(mr
, offset
, len
);
375 return memory_region_dispatch_read(mr
, offset
, data
, len
,
376 MEMTXATTRS_UNSPECIFIED
);
379 int pcilg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
381 CPUS390XState
*env
= &cpu
->env
;
382 S390PCIBusDevice
*pbdev
;
390 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
391 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
396 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
400 fh
= env
->regs
[r2
] >> 32;
401 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
402 len
= env
->regs
[r2
] & 0xf;
403 offset
= env
->regs
[r2
+ 1];
405 if (!(fh
& FH_MASK_ENABLE
)) {
406 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
410 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
412 DPRINTF("pcilg no pci dev\n");
413 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
417 switch (pbdev
->state
) {
418 case ZPCI_FS_PERMANENT_ERROR
:
420 setcc(cpu
, ZPCI_PCI_LS_ERR
);
421 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
428 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
429 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
430 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
433 result
= zpci_read_bar(pbdev
, pcias
, offset
, &data
, len
);
434 if (result
!= MEMTX_OK
) {
435 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
439 case ZPCI_CONFIG_BAR
:
440 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
441 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
444 data
= pci_host_config_read_common(
445 pbdev
->pdev
, offset
, pci_config_size(pbdev
->pdev
), len
);
447 if (zpci_endian_swap(&data
, len
)) {
448 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
453 DPRINTF("pcilg invalid space\n");
454 setcc(cpu
, ZPCI_PCI_LS_ERR
);
455 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
459 env
->regs
[r1
] = data
;
460 setcc(cpu
, ZPCI_PCI_LS_OK
);
464 static MemTxResult
zpci_write_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
465 uint64_t offset
, uint64_t data
, uint8_t len
)
469 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
470 mr
= s390_get_subregion(mr
, offset
, len
);
472 return memory_region_dispatch_write(mr
, offset
, data
, len
,
473 MEMTXATTRS_UNSPECIFIED
);
476 int pcistg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
478 CPUS390XState
*env
= &cpu
->env
;
479 uint64_t offset
, data
;
480 S390PCIBusDevice
*pbdev
;
486 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
487 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
492 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
496 fh
= env
->regs
[r2
] >> 32;
497 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
498 len
= env
->regs
[r2
] & 0xf;
499 offset
= env
->regs
[r2
+ 1];
500 data
= env
->regs
[r1
];
502 if (!(fh
& FH_MASK_ENABLE
)) {
503 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
507 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
509 DPRINTF("pcistg no pci dev\n");
510 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
514 switch (pbdev
->state
) {
515 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
516 * are already covered by the FH_MASK_ENABLE check above
518 case ZPCI_FS_PERMANENT_ERROR
:
520 setcc(cpu
, ZPCI_PCI_LS_ERR
);
521 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
528 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
529 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
531 * A length of 0 is invalid and length should not cross a double word
533 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
534 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
538 result
= zpci_write_bar(pbdev
, pcias
, offset
, data
, len
);
539 if (result
!= MEMTX_OK
) {
540 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
544 case ZPCI_CONFIG_BAR
:
545 /* ZPCI uses the pseudo BAR number 15 as configuration space */
546 /* possible access lengths are 1,2,4 and must not cross a word */
547 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
548 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
551 /* len = 1,2,4 so we do not need to test */
552 zpci_endian_swap(&data
, len
);
553 pci_host_config_write_common(pbdev
->pdev
, offset
,
554 pci_config_size(pbdev
->pdev
),
558 DPRINTF("pcistg invalid space\n");
559 setcc(cpu
, ZPCI_PCI_LS_ERR
);
560 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
564 setcc(cpu
, ZPCI_PCI_LS_OK
);
568 static void s390_pci_update_iotlb(S390PCIIOMMU
*iommu
, S390IOTLBEntry
*entry
)
570 S390IOTLBEntry
*cache
= g_hash_table_lookup(iommu
->iotlb
, &entry
->iova
);
571 IOMMUTLBEntry notify
= {
572 .target_as
= &address_space_memory
,
574 .translated_addr
= entry
->translated_addr
,
576 .addr_mask
= ~PAGE_MASK
,
579 if (entry
->perm
== IOMMU_NONE
) {
583 g_hash_table_remove(iommu
->iotlb
, &entry
->iova
);
586 if (cache
->perm
== entry
->perm
&&
587 cache
->translated_addr
== entry
->translated_addr
) {
591 notify
.perm
= IOMMU_NONE
;
592 memory_region_notify_iommu(&iommu
->iommu_mr
, notify
);
593 notify
.perm
= entry
->perm
;
596 cache
= g_new(S390IOTLBEntry
, 1);
597 cache
->iova
= entry
->iova
;
598 cache
->translated_addr
= entry
->translated_addr
;
599 cache
->len
= PAGE_SIZE
;
600 cache
->perm
= entry
->perm
;
601 g_hash_table_replace(iommu
->iotlb
, &cache
->iova
, cache
);
604 memory_region_notify_iommu(&iommu
->iommu_mr
, notify
);
607 int rpcit_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
609 CPUS390XState
*env
= &cpu
->env
;
612 S390PCIBusDevice
*pbdev
;
614 S390IOTLBEntry entry
;
617 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
618 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
623 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
627 fh
= env
->regs
[r1
] >> 32;
628 start
= env
->regs
[r2
];
629 end
= start
+ env
->regs
[r2
+ 1];
631 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
633 DPRINTF("rpcit no pci dev\n");
634 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
638 switch (pbdev
->state
) {
639 case ZPCI_FS_RESERVED
:
640 case ZPCI_FS_STANDBY
:
641 case ZPCI_FS_DISABLED
:
642 case ZPCI_FS_PERMANENT_ERROR
:
643 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
646 setcc(cpu
, ZPCI_PCI_LS_ERR
);
647 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_ERROR_RECOVER
);
653 iommu
= pbdev
->iommu
;
654 if (!iommu
->g_iota
) {
655 error
= ERR_EVENT_INVALAS
;
659 if (end
< iommu
->pba
|| start
> iommu
->pal
) {
660 error
= ERR_EVENT_OORANGE
;
664 while (start
< end
) {
665 error
= s390_guest_io_table_walk(iommu
->g_iota
, start
, &entry
);
671 while (entry
.iova
< start
&& entry
.iova
< end
) {
672 s390_pci_update_iotlb(iommu
, &entry
);
673 entry
.iova
+= PAGE_SIZE
;
674 entry
.translated_addr
+= PAGE_SIZE
;
679 pbdev
->state
= ZPCI_FS_ERROR
;
680 setcc(cpu
, ZPCI_PCI_LS_ERR
);
681 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_FUNC_IN_ERR
);
682 s390_pci_generate_error_event(error
, pbdev
->fh
, pbdev
->fid
, start
, 0);
684 setcc(cpu
, ZPCI_PCI_LS_OK
);
689 int pcistb_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r3
, uint64_t gaddr
,
690 uint8_t ar
, uintptr_t ra
)
692 CPUS390XState
*env
= &cpu
->env
;
693 S390PCIBusDevice
*pbdev
;
703 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
704 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
708 fh
= env
->regs
[r1
] >> 32;
709 pcias
= (env
->regs
[r1
] >> 16) & 0xf;
710 len
= env
->regs
[r1
] & 0xff;
711 offset
= env
->regs
[r3
];
713 if (!(fh
& FH_MASK_ENABLE
)) {
714 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
718 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
720 DPRINTF("pcistb no pci dev fh 0x%x\n", fh
);
721 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
725 switch (pbdev
->state
) {
726 case ZPCI_FS_PERMANENT_ERROR
:
728 setcc(cpu
, ZPCI_PCI_LS_ERR
);
729 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_BLOCKED
);
735 if (pcias
> ZPCI_IO_BAR_MAX
) {
736 DPRINTF("pcistb invalid space\n");
737 setcc(cpu
, ZPCI_PCI_LS_ERR
);
738 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INVAL_AS
);
742 /* Verify the address, offset and length */
743 /* offset must be a multiple of 8 */
745 goto specification_error
;
747 /* Length must be greater than 8, a multiple of 8 */
748 /* and not greater than maxstbl */
749 if ((len
<= 8) || (len
% 8) || (len
> pbdev
->maxstbl
)) {
750 goto specification_error
;
752 /* Do not cross a 4K-byte boundary */
753 if (((offset
& 0xfff) + len
) > 0x1000) {
754 goto specification_error
;
756 /* Guest address must be double word aligned */
757 if (gaddr
& 0x07UL
) {
758 goto specification_error
;
761 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
762 mr
= s390_get_subregion(mr
, offset
, len
);
765 if (!memory_region_access_valid(mr
, offset
, len
, true,
766 MEMTXATTRS_UNSPECIFIED
)) {
767 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
771 if (s390_cpu_virt_mem_read(cpu
, gaddr
, ar
, buffer
, len
)) {
772 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
776 for (i
= 0; i
< len
/ 8; i
++) {
777 result
= memory_region_dispatch_write(mr
, offset
+ i
* 8,
778 ldq_p(buffer
+ i
* 8), 8,
779 MEMTXATTRS_UNSPECIFIED
);
780 if (result
!= MEMTX_OK
) {
781 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
786 setcc(cpu
, ZPCI_PCI_LS_OK
);
790 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
794 static int reg_irqs(CPUS390XState
*env
, S390PCIBusDevice
*pbdev
, ZpciFib fib
)
797 uint8_t isc
= FIB_DATA_ISC(ldl_p(&fib
.data
));
799 pbdev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
800 CSS_IO_ADAPTER_PCI
, isc
);
801 pbdev
->summary_ind
= get_indicator(ldq_p(&fib
.aisb
), sizeof(uint64_t));
802 len
= BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib
.data
))) * sizeof(unsigned long);
803 pbdev
->indicator
= get_indicator(ldq_p(&fib
.aibv
), len
);
805 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
810 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
815 pbdev
->routes
.adapter
.summary_addr
= ldq_p(&fib
.aisb
);
816 pbdev
->routes
.adapter
.summary_offset
= FIB_DATA_AISBO(ldl_p(&fib
.data
));
817 pbdev
->routes
.adapter
.ind_addr
= ldq_p(&fib
.aibv
);
818 pbdev
->routes
.adapter
.ind_offset
= FIB_DATA_AIBVO(ldl_p(&fib
.data
));
820 pbdev
->noi
= FIB_DATA_NOI(ldl_p(&fib
.data
));
821 pbdev
->sum
= FIB_DATA_SUM(ldl_p(&fib
.data
));
823 DPRINTF("reg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
826 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
827 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
828 pbdev
->summary_ind
= NULL
;
829 pbdev
->indicator
= NULL
;
833 int pci_dereg_irqs(S390PCIBusDevice
*pbdev
)
835 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
836 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
838 pbdev
->summary_ind
= NULL
;
839 pbdev
->indicator
= NULL
;
840 pbdev
->routes
.adapter
.summary_addr
= 0;
841 pbdev
->routes
.adapter
.summary_offset
= 0;
842 pbdev
->routes
.adapter
.ind_addr
= 0;
843 pbdev
->routes
.adapter
.ind_offset
= 0;
848 DPRINTF("dereg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
852 static int reg_ioat(CPUS390XState
*env
, S390PCIIOMMU
*iommu
, ZpciFib fib
,
855 uint64_t pba
= ldq_p(&fib
.pba
);
856 uint64_t pal
= ldq_p(&fib
.pal
);
857 uint64_t g_iota
= ldq_p(&fib
.iota
);
858 uint8_t dt
= (g_iota
>> 2) & 0x7;
859 uint8_t t
= (g_iota
>> 11) & 0x1;
863 if (pba
> pal
|| pba
< ZPCI_SDMA_ADDR
|| pal
> ZPCI_EDMA_ADDR
) {
864 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
868 /* currently we only support designation type 1 with translation */
869 if (!(dt
== ZPCI_IOTA_RTTO
&& t
)) {
870 error_report("unsupported ioat dt %d t %d", dt
, t
);
871 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
877 iommu
->g_iota
= g_iota
;
879 s390_pci_iommu_enable(iommu
);
884 void pci_dereg_ioat(S390PCIIOMMU
*iommu
)
886 s390_pci_iommu_disable(iommu
);
892 int mpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
895 CPUS390XState
*env
= &cpu
->env
;
899 S390PCIBusDevice
*pbdev
;
900 uint64_t cc
= ZPCI_PCI_LS_OK
;
902 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
903 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
907 oc
= env
->regs
[r1
] & 0xff;
908 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
909 fh
= env
->regs
[r1
] >> 32;
912 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
916 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
918 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh
);
919 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
923 switch (pbdev
->state
) {
924 case ZPCI_FS_RESERVED
:
925 case ZPCI_FS_STANDBY
:
926 case ZPCI_FS_DISABLED
:
927 case ZPCI_FS_PERMANENT_ERROR
:
928 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
934 if (s390_cpu_virt_mem_read(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
935 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
940 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
945 case ZPCI_MOD_FC_REG_INT
:
946 if (pbdev
->summary_ind
) {
947 cc
= ZPCI_PCI_LS_ERR
;
948 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
949 } else if (reg_irqs(env
, pbdev
, fib
)) {
950 cc
= ZPCI_PCI_LS_ERR
;
951 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_RES_NOT_AVAIL
);
954 case ZPCI_MOD_FC_DEREG_INT
:
955 if (!pbdev
->summary_ind
) {
956 cc
= ZPCI_PCI_LS_ERR
;
957 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
959 pci_dereg_irqs(pbdev
);
962 case ZPCI_MOD_FC_REG_IOAT
:
964 cc
= ZPCI_PCI_LS_ERR
;
965 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
966 } else if (pbdev
->iommu
->enabled
) {
967 cc
= ZPCI_PCI_LS_ERR
;
968 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
969 } else if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
970 cc
= ZPCI_PCI_LS_ERR
;
971 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
974 case ZPCI_MOD_FC_DEREG_IOAT
:
976 cc
= ZPCI_PCI_LS_ERR
;
977 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
978 } else if (!pbdev
->iommu
->enabled
) {
979 cc
= ZPCI_PCI_LS_ERR
;
980 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
982 pci_dereg_ioat(pbdev
->iommu
);
985 case ZPCI_MOD_FC_REREG_IOAT
:
987 cc
= ZPCI_PCI_LS_ERR
;
988 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
989 } else if (!pbdev
->iommu
->enabled
) {
990 cc
= ZPCI_PCI_LS_ERR
;
991 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
993 pci_dereg_ioat(pbdev
->iommu
);
994 if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
995 cc
= ZPCI_PCI_LS_ERR
;
996 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
1000 case ZPCI_MOD_FC_RESET_ERROR
:
1001 switch (pbdev
->state
) {
1002 case ZPCI_FS_BLOCKED
:
1004 pbdev
->state
= ZPCI_FS_ENABLED
;
1007 cc
= ZPCI_PCI_LS_ERR
;
1008 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1011 case ZPCI_MOD_FC_RESET_BLOCK
:
1012 switch (pbdev
->state
) {
1014 pbdev
->state
= ZPCI_FS_BLOCKED
;
1017 cc
= ZPCI_PCI_LS_ERR
;
1018 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1021 case ZPCI_MOD_FC_SET_MEASURE
:
1022 pbdev
->fmb_addr
= ldq_p(&fib
.fmb_addr
);
1025 s390_program_interrupt(&cpu
->env
, PGM_OPERAND
, 6, ra
);
1026 cc
= ZPCI_PCI_LS_ERR
;
1033 int stpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
1036 CPUS390XState
*env
= &cpu
->env
;
1040 S390PCIBusDevice
*pbdev
;
1042 uint64_t cc
= ZPCI_PCI_LS_OK
;
1044 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1045 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
1049 fh
= env
->regs
[r1
] >> 32;
1050 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1053 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1054 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_INVAL_DMAAS
);
1059 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1063 pbdev
= s390_pci_find_dev_by_idx(s390_get_phb(), fh
& FH_MASK_INDEX
);
1065 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1069 memset(&fib
, 0, sizeof(fib
));
1071 switch (pbdev
->state
) {
1072 case ZPCI_FS_RESERVED
:
1073 case ZPCI_FS_STANDBY
:
1074 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1076 case ZPCI_FS_DISABLED
:
1077 if (fh
& FH_MASK_ENABLE
) {
1078 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1082 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1083 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1086 case ZPCI_FS_BLOCKED
:
1088 case ZPCI_FS_ENABLED
:
1090 if (pbdev
->iommu
->enabled
) {
1093 if (!(fh
& FH_MASK_ENABLE
)) {
1094 env
->regs
[r1
] |= 1ULL << 63;
1097 case ZPCI_FS_PERMANENT_ERROR
:
1098 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1099 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_PERM_ERROR
);
1103 stq_p(&fib
.pba
, pbdev
->iommu
->pba
);
1104 stq_p(&fib
.pal
, pbdev
->iommu
->pal
);
1105 stq_p(&fib
.iota
, pbdev
->iommu
->g_iota
);
1106 stq_p(&fib
.aibv
, pbdev
->routes
.adapter
.ind_addr
);
1107 stq_p(&fib
.aisb
, pbdev
->routes
.adapter
.summary_addr
);
1108 stq_p(&fib
.fmb_addr
, pbdev
->fmb_addr
);
1110 data
= ((uint32_t)pbdev
->isc
<< 28) | ((uint32_t)pbdev
->noi
<< 16) |
1111 ((uint32_t)pbdev
->routes
.adapter
.ind_offset
<< 8) |
1112 ((uint32_t)pbdev
->sum
<< 7) | pbdev
->routes
.adapter
.summary_offset
;
1113 stl_p(&fib
.data
, data
);
1116 if (s390_cpu_virt_mem_write(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1117 s390_cpu_virt_mem_handle_exc(cpu
, ra
);