2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
14 #include "qemu/osdep.h"
16 #include "s390-pci-inst.h"
17 #include "s390-pci-bus.h"
18 #include "exec/memop.h"
19 #include "exec/memory-internal.h"
20 #include "qemu/error-report.h"
21 #include "sysemu/hw_accel.h"
22 #include "hw/s390x/tod.h"
24 #ifndef DEBUG_S390PCI_INST
25 #define DEBUG_S390PCI_INST 0
28 #define DPRINTF(fmt, ...) \
30 if (DEBUG_S390PCI_INST) { \
31 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
35 static void s390_set_status_code(CPUS390XState
*env
,
36 uint8_t r
, uint64_t status_code
)
38 env
->regs
[r
] &= ~0xff000000ULL
;
39 env
->regs
[r
] |= (status_code
& 0xff) << 24;
42 static int list_pci(ClpReqRspListPci
*rrb
, uint8_t *cc
)
44 S390PCIBusDevice
*pbdev
= NULL
;
45 S390pciState
*s
= s390_get_phb();
46 uint32_t res_code
, initial_l2
, g_l2
;
48 uint64_t resume_token
;
51 if (lduw_p(&rrb
->request
.hdr
.len
) != 32) {
52 res_code
= CLP_RC_LEN
;
57 if ((ldl_p(&rrb
->request
.fmt
) & CLP_MASK_FMT
) != 0) {
58 res_code
= CLP_RC_FMT
;
63 if ((ldl_p(&rrb
->request
.fmt
) & ~CLP_MASK_FMT
) != 0 ||
64 ldq_p(&rrb
->request
.reserved1
) != 0) {
65 res_code
= CLP_RC_RESNOT0
;
70 resume_token
= ldq_p(&rrb
->request
.resume_token
);
73 pbdev
= s390_pci_find_dev_by_idx(s
, resume_token
);
75 res_code
= CLP_RC_LISTPCI_BADRT
;
80 pbdev
= s390_pci_find_next_avail_dev(s
, NULL
);
83 if (lduw_p(&rrb
->response
.hdr
.len
) < 48) {
89 initial_l2
= lduw_p(&rrb
->response
.hdr
.len
);
90 if ((initial_l2
- LIST_PCI_HDR_LEN
) % sizeof(ClpFhListEntry
)
92 res_code
= CLP_RC_LEN
;
98 stl_p(&rrb
->response
.fmt
, 0);
99 stq_p(&rrb
->response
.reserved1
, 0);
100 stl_p(&rrb
->response
.mdd
, FH_MASK_SHM
);
101 stw_p(&rrb
->response
.max_fn
, PCI_MAX_FUNCTIONS
);
102 rrb
->response
.flags
= UID_CHECKING_ENABLED
;
103 rrb
->response
.entry_size
= sizeof(ClpFhListEntry
);
106 g_l2
= LIST_PCI_HDR_LEN
;
107 while (g_l2
< initial_l2
&& pbdev
) {
108 stw_p(&rrb
->response
.fh_list
[i
].device_id
,
109 pci_get_word(pbdev
->pdev
->config
+ PCI_DEVICE_ID
));
110 stw_p(&rrb
->response
.fh_list
[i
].vendor_id
,
111 pci_get_word(pbdev
->pdev
->config
+ PCI_VENDOR_ID
));
112 /* Ignore RESERVED devices. */
113 stl_p(&rrb
->response
.fh_list
[i
].config
,
114 pbdev
->state
== ZPCI_FS_STANDBY
? 0 : 1 << 31);
115 stl_p(&rrb
->response
.fh_list
[i
].fid
, pbdev
->fid
);
116 stl_p(&rrb
->response
.fh_list
[i
].fh
, pbdev
->fh
);
118 g_l2
+= sizeof(ClpFhListEntry
);
119 /* Add endian check for DPRINTF? */
120 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
122 lduw_p(&rrb
->response
.fh_list
[i
].vendor_id
),
123 lduw_p(&rrb
->response
.fh_list
[i
].device_id
),
124 ldl_p(&rrb
->response
.fh_list
[i
].fid
),
125 ldl_p(&rrb
->response
.fh_list
[i
].fh
));
126 pbdev
= s390_pci_find_next_avail_dev(s
, pbdev
);
133 resume_token
= pbdev
->fh
& FH_MASK_INDEX
;
135 stq_p(&rrb
->response
.resume_token
, resume_token
);
136 stw_p(&rrb
->response
.hdr
.len
, g_l2
);
137 stw_p(&rrb
->response
.hdr
.rsp
, CLP_RC_OK
);
140 DPRINTF("list pci failed rc 0x%x\n", rc
);
141 stw_p(&rrb
->response
.hdr
.rsp
, res_code
);
146 int clp_service_call(S390CPU
*cpu
, uint8_t r2
, uintptr_t ra
)
150 S390PCIBusDevice
*pbdev
;
153 uint8_t buffer
[4096 * 2];
155 CPUS390XState
*env
= &cpu
->env
;
156 S390pciState
*s
= s390_get_phb();
159 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
160 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
164 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
, sizeof(*reqh
))) {
165 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
168 reqh
= (ClpReqHdr
*)buffer
;
169 req_len
= lduw_p(&reqh
->len
);
170 if (req_len
< 16 || req_len
> 8184 || (req_len
% 8 != 0)) {
171 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
175 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
176 req_len
+ sizeof(*resh
))) {
177 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
180 resh
= (ClpRspHdr
*)(buffer
+ req_len
);
181 res_len
= lduw_p(&resh
->len
);
182 if (res_len
< 8 || res_len
> 8176 || (res_len
% 8 != 0)) {
183 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
186 if ((req_len
+ res_len
) > 8192) {
187 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
191 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
192 req_len
+ res_len
)) {
193 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
198 stw_p(&resh
->rsp
, CLP_RC_LEN
);
202 switch (lduw_p(&reqh
->cmd
)) {
204 ClpReqRspListPci
*rrb
= (ClpReqRspListPci
*)buffer
;
208 case CLP_SET_PCI_FN
: {
209 ClpReqSetPci
*reqsetpci
= (ClpReqSetPci
*)reqh
;
210 ClpRspSetPci
*ressetpci
= (ClpRspSetPci
*)resh
;
212 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqsetpci
->fh
));
214 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
218 switch (reqsetpci
->oc
) {
219 case CLP_SET_ENABLE_PCI_FN
:
220 switch (reqsetpci
->ndas
) {
222 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_DMAAS
);
227 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_RES
);
231 if (pbdev
->fh
& FH_MASK_ENABLE
) {
232 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
236 pbdev
->fh
|= FH_MASK_ENABLE
;
237 pbdev
->state
= ZPCI_FS_ENABLED
;
238 stl_p(&ressetpci
->fh
, pbdev
->fh
);
239 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
241 case CLP_SET_DISABLE_PCI_FN
:
242 if (!(pbdev
->fh
& FH_MASK_ENABLE
)) {
243 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
246 device_reset(DEVICE(pbdev
));
247 pbdev
->fh
&= ~FH_MASK_ENABLE
;
248 pbdev
->state
= ZPCI_FS_DISABLED
;
249 stl_p(&ressetpci
->fh
, pbdev
->fh
);
250 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
253 DPRINTF("unknown set pci command\n");
254 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
259 case CLP_QUERY_PCI_FN
: {
260 ClpReqQueryPci
*reqquery
= (ClpReqQueryPci
*)reqh
;
261 ClpRspQueryPci
*resquery
= (ClpRspQueryPci
*)resh
;
263 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqquery
->fh
));
265 DPRINTF("query pci no pci dev\n");
266 stw_p(&resquery
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
270 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
271 uint32_t data
= pci_get_long(pbdev
->pdev
->config
+
272 PCI_BASE_ADDRESS_0
+ (i
* 4));
274 stl_p(&resquery
->bar
[i
], data
);
275 resquery
->bar_size
[i
] = pbdev
->pdev
->io_regions
[i
].size
?
276 ctz64(pbdev
->pdev
->io_regions
[i
].size
) : 0;
277 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64
"barsize 0x%x\n", i
,
278 ldl_p(&resquery
->bar
[i
]),
279 pbdev
->pdev
->io_regions
[i
].size
,
280 resquery
->bar_size
[i
]);
283 stq_p(&resquery
->sdma
, ZPCI_SDMA_ADDR
);
284 stq_p(&resquery
->edma
, ZPCI_EDMA_ADDR
);
285 stl_p(&resquery
->fid
, pbdev
->fid
);
286 stw_p(&resquery
->pchid
, 0);
287 stw_p(&resquery
->ug
, 1);
288 stl_p(&resquery
->uid
, pbdev
->uid
);
289 stw_p(&resquery
->hdr
.rsp
, CLP_RC_OK
);
292 case CLP_QUERY_PCI_FNGRP
: {
293 ClpRspQueryPciGrp
*resgrp
= (ClpRspQueryPciGrp
*)resh
;
295 stq_p(&resgrp
->dasm
, 0);
296 stq_p(&resgrp
->msia
, ZPCI_MSI_ADDR
);
297 stw_p(&resgrp
->mui
, DEFAULT_MUI
);
298 stw_p(&resgrp
->i
, 128);
299 stw_p(&resgrp
->maxstbl
, 128);
302 stw_p(&resgrp
->hdr
.rsp
, CLP_RC_OK
);
306 DPRINTF("unknown clp command\n");
307 stw_p(&resh
->rsp
, CLP_RC_CMD
);
312 if (s390_cpu_virt_mem_write(cpu
, env
->regs
[r2
], r2
, buffer
,
313 req_len
+ res_len
)) {
314 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
322 * Swap data contained in s390x big endian registers to little endian
325 * @ptr: a pointer to a uint64_t data field
326 * @len: the length of the valid data, must be 1,2,4 or 8
328 static int zpci_endian_swap(uint64_t *ptr
, uint8_t len
)
330 uint64_t data
= *ptr
;
336 data
= bswap16(data
);
339 data
= bswap32(data
);
342 data
= bswap64(data
);
351 static MemoryRegion
*s390_get_subregion(MemoryRegion
*mr
, uint64_t offset
,
354 MemoryRegion
*subregion
;
355 uint64_t subregion_size
;
357 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
358 subregion_size
= int128_get64(subregion
->size
);
359 if ((offset
>= subregion
->addr
) &&
360 (offset
+ len
) <= (subregion
->addr
+ subregion_size
)) {
368 static MemTxResult
zpci_read_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
369 uint64_t offset
, uint64_t *data
, uint8_t len
)
373 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
374 mr
= s390_get_subregion(mr
, offset
, len
);
376 return memory_region_dispatch_read(mr
, offset
, data
,
377 size_memop(len
) | MO_BE
,
378 MEMTXATTRS_UNSPECIFIED
);
381 int pcilg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
383 CPUS390XState
*env
= &cpu
->env
;
384 S390PCIBusDevice
*pbdev
;
392 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
393 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
398 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
402 fh
= env
->regs
[r2
] >> 32;
403 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
404 len
= env
->regs
[r2
] & 0xf;
405 offset
= env
->regs
[r2
+ 1];
407 if (!(fh
& FH_MASK_ENABLE
)) {
408 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
412 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
414 DPRINTF("pcilg no pci dev\n");
415 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
419 switch (pbdev
->state
) {
420 case ZPCI_FS_PERMANENT_ERROR
:
422 setcc(cpu
, ZPCI_PCI_LS_ERR
);
423 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
430 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
431 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
432 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
435 result
= zpci_read_bar(pbdev
, pcias
, offset
, &data
, len
);
436 if (result
!= MEMTX_OK
) {
437 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
441 case ZPCI_CONFIG_BAR
:
442 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
443 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
446 data
= pci_host_config_read_common(
447 pbdev
->pdev
, offset
, pci_config_size(pbdev
->pdev
), len
);
449 if (zpci_endian_swap(&data
, len
)) {
450 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
455 DPRINTF("pcilg invalid space\n");
456 setcc(cpu
, ZPCI_PCI_LS_ERR
);
457 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
461 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_LD
]++;
463 env
->regs
[r1
] = data
;
464 setcc(cpu
, ZPCI_PCI_LS_OK
);
468 static MemTxResult
zpci_write_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
469 uint64_t offset
, uint64_t data
, uint8_t len
)
473 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
474 mr
= s390_get_subregion(mr
, offset
, len
);
476 return memory_region_dispatch_write(mr
, offset
, data
,
477 size_memop(len
) | MO_BE
,
478 MEMTXATTRS_UNSPECIFIED
);
481 int pcistg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
483 CPUS390XState
*env
= &cpu
->env
;
484 uint64_t offset
, data
;
485 S390PCIBusDevice
*pbdev
;
491 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
492 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
497 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
501 fh
= env
->regs
[r2
] >> 32;
502 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
503 len
= env
->regs
[r2
] & 0xf;
504 offset
= env
->regs
[r2
+ 1];
505 data
= env
->regs
[r1
];
507 if (!(fh
& FH_MASK_ENABLE
)) {
508 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
512 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
514 DPRINTF("pcistg no pci dev\n");
515 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
519 switch (pbdev
->state
) {
520 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
521 * are already covered by the FH_MASK_ENABLE check above
523 case ZPCI_FS_PERMANENT_ERROR
:
525 setcc(cpu
, ZPCI_PCI_LS_ERR
);
526 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
533 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
534 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
536 * A length of 0 is invalid and length should not cross a double word
538 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
539 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
543 result
= zpci_write_bar(pbdev
, pcias
, offset
, data
, len
);
544 if (result
!= MEMTX_OK
) {
545 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
549 case ZPCI_CONFIG_BAR
:
550 /* ZPCI uses the pseudo BAR number 15 as configuration space */
551 /* possible access lengths are 1,2,4 and must not cross a word */
552 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
553 s390_program_interrupt(env
, PGM_OPERAND
, 4, ra
);
556 /* len = 1,2,4 so we do not need to test */
557 zpci_endian_swap(&data
, len
);
558 pci_host_config_write_common(pbdev
->pdev
, offset
,
559 pci_config_size(pbdev
->pdev
),
563 DPRINTF("pcistg invalid space\n");
564 setcc(cpu
, ZPCI_PCI_LS_ERR
);
565 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
569 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_ST
]++;
571 setcc(cpu
, ZPCI_PCI_LS_OK
);
575 static void s390_pci_update_iotlb(S390PCIIOMMU
*iommu
, S390IOTLBEntry
*entry
)
577 S390IOTLBEntry
*cache
= g_hash_table_lookup(iommu
->iotlb
, &entry
->iova
);
578 IOMMUTLBEntry notify
= {
579 .target_as
= &address_space_memory
,
581 .translated_addr
= entry
->translated_addr
,
583 .addr_mask
= ~PAGE_MASK
,
586 if (entry
->perm
== IOMMU_NONE
) {
590 g_hash_table_remove(iommu
->iotlb
, &entry
->iova
);
593 if (cache
->perm
== entry
->perm
&&
594 cache
->translated_addr
== entry
->translated_addr
) {
598 notify
.perm
= IOMMU_NONE
;
599 memory_region_notify_iommu(&iommu
->iommu_mr
, 0, notify
);
600 notify
.perm
= entry
->perm
;
603 cache
= g_new(S390IOTLBEntry
, 1);
604 cache
->iova
= entry
->iova
;
605 cache
->translated_addr
= entry
->translated_addr
;
606 cache
->len
= PAGE_SIZE
;
607 cache
->perm
= entry
->perm
;
608 g_hash_table_replace(iommu
->iotlb
, &cache
->iova
, cache
);
611 memory_region_notify_iommu(&iommu
->iommu_mr
, 0, notify
);
614 int rpcit_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
616 CPUS390XState
*env
= &cpu
->env
;
619 S390PCIBusDevice
*pbdev
;
621 S390IOTLBEntry entry
;
624 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
625 s390_program_interrupt(env
, PGM_PRIVILEGED
, 4, ra
);
630 s390_program_interrupt(env
, PGM_SPECIFICATION
, 4, ra
);
634 fh
= env
->regs
[r1
] >> 32;
635 start
= env
->regs
[r2
];
636 end
= start
+ env
->regs
[r2
+ 1];
638 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
640 DPRINTF("rpcit no pci dev\n");
641 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
645 switch (pbdev
->state
) {
646 case ZPCI_FS_RESERVED
:
647 case ZPCI_FS_STANDBY
:
648 case ZPCI_FS_DISABLED
:
649 case ZPCI_FS_PERMANENT_ERROR
:
650 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
653 setcc(cpu
, ZPCI_PCI_LS_ERR
);
654 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_ERROR_RECOVER
);
660 iommu
= pbdev
->iommu
;
661 if (!iommu
->g_iota
) {
662 error
= ERR_EVENT_INVALAS
;
666 if (end
< iommu
->pba
|| start
> iommu
->pal
) {
667 error
= ERR_EVENT_OORANGE
;
671 while (start
< end
) {
672 error
= s390_guest_io_table_walk(iommu
->g_iota
, start
, &entry
);
678 while (entry
.iova
< start
&& entry
.iova
< end
) {
679 s390_pci_update_iotlb(iommu
, &entry
);
680 entry
.iova
+= PAGE_SIZE
;
681 entry
.translated_addr
+= PAGE_SIZE
;
686 pbdev
->state
= ZPCI_FS_ERROR
;
687 setcc(cpu
, ZPCI_PCI_LS_ERR
);
688 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_FUNC_IN_ERR
);
689 s390_pci_generate_error_event(error
, pbdev
->fh
, pbdev
->fid
, start
, 0);
691 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_RPCIT
]++;
692 setcc(cpu
, ZPCI_PCI_LS_OK
);
697 int pcistb_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r3
, uint64_t gaddr
,
698 uint8_t ar
, uintptr_t ra
)
700 CPUS390XState
*env
= &cpu
->env
;
701 S390PCIBusDevice
*pbdev
;
711 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
712 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
716 fh
= env
->regs
[r1
] >> 32;
717 pcias
= (env
->regs
[r1
] >> 16) & 0xf;
718 len
= env
->regs
[r1
] & 0xff;
719 offset
= env
->regs
[r3
];
721 if (!(fh
& FH_MASK_ENABLE
)) {
722 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
726 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
728 DPRINTF("pcistb no pci dev fh 0x%x\n", fh
);
729 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
733 switch (pbdev
->state
) {
734 case ZPCI_FS_PERMANENT_ERROR
:
736 setcc(cpu
, ZPCI_PCI_LS_ERR
);
737 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_BLOCKED
);
743 if (pcias
> ZPCI_IO_BAR_MAX
) {
744 DPRINTF("pcistb invalid space\n");
745 setcc(cpu
, ZPCI_PCI_LS_ERR
);
746 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INVAL_AS
);
750 /* Verify the address, offset and length */
751 /* offset must be a multiple of 8 */
753 goto specification_error
;
755 /* Length must be greater than 8, a multiple of 8 */
756 /* and not greater than maxstbl */
757 if ((len
<= 8) || (len
% 8) || (len
> pbdev
->maxstbl
)) {
758 goto specification_error
;
760 /* Do not cross a 4K-byte boundary */
761 if (((offset
& 0xfff) + len
) > 0x1000) {
762 goto specification_error
;
764 /* Guest address must be double word aligned */
765 if (gaddr
& 0x07UL
) {
766 goto specification_error
;
769 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
770 mr
= s390_get_subregion(mr
, offset
, len
);
773 if (!memory_region_access_valid(mr
, offset
, len
, true,
774 MEMTXATTRS_UNSPECIFIED
)) {
775 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
779 if (s390_cpu_virt_mem_read(cpu
, gaddr
, ar
, buffer
, len
)) {
780 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
784 for (i
= 0; i
< len
/ 8; i
++) {
785 result
= memory_region_dispatch_write(mr
, offset
+ i
* 8,
786 ldq_p(buffer
+ i
* 8),
787 MO_64
, MEMTXATTRS_UNSPECIFIED
);
788 if (result
!= MEMTX_OK
) {
789 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
794 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_STB
]++;
796 setcc(cpu
, ZPCI_PCI_LS_OK
);
800 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
804 static int reg_irqs(CPUS390XState
*env
, S390PCIBusDevice
*pbdev
, ZpciFib fib
)
807 uint8_t isc
= FIB_DATA_ISC(ldl_p(&fib
.data
));
809 pbdev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
810 CSS_IO_ADAPTER_PCI
, isc
);
811 pbdev
->summary_ind
= get_indicator(ldq_p(&fib
.aisb
), sizeof(uint64_t));
812 len
= BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib
.data
))) * sizeof(unsigned long);
813 pbdev
->indicator
= get_indicator(ldq_p(&fib
.aibv
), len
);
815 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
820 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
825 pbdev
->routes
.adapter
.summary_addr
= ldq_p(&fib
.aisb
);
826 pbdev
->routes
.adapter
.summary_offset
= FIB_DATA_AISBO(ldl_p(&fib
.data
));
827 pbdev
->routes
.adapter
.ind_addr
= ldq_p(&fib
.aibv
);
828 pbdev
->routes
.adapter
.ind_offset
= FIB_DATA_AIBVO(ldl_p(&fib
.data
));
830 pbdev
->noi
= FIB_DATA_NOI(ldl_p(&fib
.data
));
831 pbdev
->sum
= FIB_DATA_SUM(ldl_p(&fib
.data
));
833 DPRINTF("reg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
836 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
837 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
838 pbdev
->summary_ind
= NULL
;
839 pbdev
->indicator
= NULL
;
843 int pci_dereg_irqs(S390PCIBusDevice
*pbdev
)
845 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
846 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
848 pbdev
->summary_ind
= NULL
;
849 pbdev
->indicator
= NULL
;
850 pbdev
->routes
.adapter
.summary_addr
= 0;
851 pbdev
->routes
.adapter
.summary_offset
= 0;
852 pbdev
->routes
.adapter
.ind_addr
= 0;
853 pbdev
->routes
.adapter
.ind_offset
= 0;
858 DPRINTF("dereg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
862 static int reg_ioat(CPUS390XState
*env
, S390PCIIOMMU
*iommu
, ZpciFib fib
,
865 uint64_t pba
= ldq_p(&fib
.pba
);
866 uint64_t pal
= ldq_p(&fib
.pal
);
867 uint64_t g_iota
= ldq_p(&fib
.iota
);
868 uint8_t dt
= (g_iota
>> 2) & 0x7;
869 uint8_t t
= (g_iota
>> 11) & 0x1;
873 if (pba
> pal
|| pba
< ZPCI_SDMA_ADDR
|| pal
> ZPCI_EDMA_ADDR
) {
874 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
878 /* currently we only support designation type 1 with translation */
879 if (!(dt
== ZPCI_IOTA_RTTO
&& t
)) {
880 error_report("unsupported ioat dt %d t %d", dt
, t
);
881 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
887 iommu
->g_iota
= g_iota
;
889 s390_pci_iommu_enable(iommu
);
894 void pci_dereg_ioat(S390PCIIOMMU
*iommu
)
896 s390_pci_iommu_disable(iommu
);
902 void fmb_timer_free(S390PCIBusDevice
*pbdev
)
904 if (pbdev
->fmb_timer
) {
905 timer_del(pbdev
->fmb_timer
);
906 timer_free(pbdev
->fmb_timer
);
907 pbdev
->fmb_timer
= NULL
;
910 memset(&pbdev
->fmb
, 0, sizeof(ZpciFmb
));
913 static int fmb_do_update(S390PCIBusDevice
*pbdev
, int offset
, uint64_t val
,
917 uint64_t dst
= pbdev
->fmb_addr
+ offset
;
921 address_space_stq_be(&address_space_memory
, dst
, val
,
922 MEMTXATTRS_UNSPECIFIED
,
926 address_space_stl_be(&address_space_memory
, dst
, val
,
927 MEMTXATTRS_UNSPECIFIED
,
931 address_space_stw_be(&address_space_memory
, dst
, val
,
932 MEMTXATTRS_UNSPECIFIED
,
936 address_space_stb(&address_space_memory
, dst
, val
,
937 MEMTXATTRS_UNSPECIFIED
,
944 if (ret
!= MEMTX_OK
) {
945 s390_pci_generate_error_event(ERR_EVENT_FMBA
, pbdev
->fh
, pbdev
->fid
,
947 fmb_timer_free(pbdev
);
953 static void fmb_update(void *opaque
)
955 S390PCIBusDevice
*pbdev
= opaque
;
956 int64_t t
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
960 pbdev
->fmb
.last_update
*= 2;
961 pbdev
->fmb
.last_update
|= UPDATE_U_BIT
;
962 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, last_update
),
963 pbdev
->fmb
.last_update
,
964 sizeof(pbdev
->fmb
.last_update
))) {
968 /* Update FMB sample count */
969 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, sample
),
971 sizeof(pbdev
->fmb
.sample
))) {
975 /* Update FMB counters */
976 for (i
= 0; i
< ZPCI_FMB_CNT_MAX
; i
++) {
977 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, counter
[i
]),
978 pbdev
->fmb
.counter
[i
],
979 sizeof(pbdev
->fmb
.counter
[0]))) {
984 /* Clear U bit and update the time */
985 pbdev
->fmb
.last_update
= time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
));
986 pbdev
->fmb
.last_update
*= 2;
987 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, last_update
),
988 pbdev
->fmb
.last_update
,
989 sizeof(pbdev
->fmb
.last_update
))) {
992 timer_mod(pbdev
->fmb_timer
, t
+ DEFAULT_MUI
);
995 int mpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
998 CPUS390XState
*env
= &cpu
->env
;
1002 S390PCIBusDevice
*pbdev
;
1003 uint64_t cc
= ZPCI_PCI_LS_OK
;
1005 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1006 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
1010 oc
= env
->regs
[r1
] & 0xff;
1011 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1012 fh
= env
->regs
[r1
] >> 32;
1015 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1019 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
1021 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh
);
1022 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1026 switch (pbdev
->state
) {
1027 case ZPCI_FS_RESERVED
:
1028 case ZPCI_FS_STANDBY
:
1029 case ZPCI_FS_DISABLED
:
1030 case ZPCI_FS_PERMANENT_ERROR
:
1031 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1037 if (s390_cpu_virt_mem_read(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1038 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
1043 s390_program_interrupt(env
, PGM_OPERAND
, 6, ra
);
1048 case ZPCI_MOD_FC_REG_INT
:
1049 if (pbdev
->summary_ind
) {
1050 cc
= ZPCI_PCI_LS_ERR
;
1051 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1052 } else if (reg_irqs(env
, pbdev
, fib
)) {
1053 cc
= ZPCI_PCI_LS_ERR
;
1054 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_RES_NOT_AVAIL
);
1057 case ZPCI_MOD_FC_DEREG_INT
:
1058 if (!pbdev
->summary_ind
) {
1059 cc
= ZPCI_PCI_LS_ERR
;
1060 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1062 pci_dereg_irqs(pbdev
);
1065 case ZPCI_MOD_FC_REG_IOAT
:
1067 cc
= ZPCI_PCI_LS_ERR
;
1068 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1069 } else if (pbdev
->iommu
->enabled
) {
1070 cc
= ZPCI_PCI_LS_ERR
;
1071 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1072 } else if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
1073 cc
= ZPCI_PCI_LS_ERR
;
1074 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
1077 case ZPCI_MOD_FC_DEREG_IOAT
:
1079 cc
= ZPCI_PCI_LS_ERR
;
1080 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1081 } else if (!pbdev
->iommu
->enabled
) {
1082 cc
= ZPCI_PCI_LS_ERR
;
1083 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1085 pci_dereg_ioat(pbdev
->iommu
);
1088 case ZPCI_MOD_FC_REREG_IOAT
:
1090 cc
= ZPCI_PCI_LS_ERR
;
1091 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1092 } else if (!pbdev
->iommu
->enabled
) {
1093 cc
= ZPCI_PCI_LS_ERR
;
1094 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1096 pci_dereg_ioat(pbdev
->iommu
);
1097 if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
1098 cc
= ZPCI_PCI_LS_ERR
;
1099 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
1103 case ZPCI_MOD_FC_RESET_ERROR
:
1104 switch (pbdev
->state
) {
1105 case ZPCI_FS_BLOCKED
:
1107 pbdev
->state
= ZPCI_FS_ENABLED
;
1110 cc
= ZPCI_PCI_LS_ERR
;
1111 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1114 case ZPCI_MOD_FC_RESET_BLOCK
:
1115 switch (pbdev
->state
) {
1117 pbdev
->state
= ZPCI_FS_BLOCKED
;
1120 cc
= ZPCI_PCI_LS_ERR
;
1121 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1124 case ZPCI_MOD_FC_SET_MEASURE
: {
1125 uint64_t fmb_addr
= ldq_p(&fib
.fmb_addr
);
1127 if (fmb_addr
& FMBK_MASK
) {
1128 cc
= ZPCI_PCI_LS_ERR
;
1129 s390_pci_generate_error_event(ERR_EVENT_FMBPRO
, pbdev
->fh
,
1130 pbdev
->fid
, fmb_addr
, 0);
1131 fmb_timer_free(pbdev
);
1136 /* Stop updating FMB. */
1137 fmb_timer_free(pbdev
);
1141 if (!pbdev
->fmb_timer
) {
1142 pbdev
->fmb_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1144 } else if (timer_pending(pbdev
->fmb_timer
)) {
1145 /* Remove pending timer to update FMB address. */
1146 timer_del(pbdev
->fmb_timer
);
1148 pbdev
->fmb_addr
= fmb_addr
;
1149 timer_mod(pbdev
->fmb_timer
,
1150 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + DEFAULT_MUI
);
1154 s390_program_interrupt(&cpu
->env
, PGM_OPERAND
, 6, ra
);
1155 cc
= ZPCI_PCI_LS_ERR
;
1162 int stpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
1165 CPUS390XState
*env
= &cpu
->env
;
1169 S390PCIBusDevice
*pbdev
;
1171 uint64_t cc
= ZPCI_PCI_LS_OK
;
1173 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1174 s390_program_interrupt(env
, PGM_PRIVILEGED
, 6, ra
);
1178 fh
= env
->regs
[r1
] >> 32;
1179 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1182 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1183 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_INVAL_DMAAS
);
1188 s390_program_interrupt(env
, PGM_SPECIFICATION
, 6, ra
);
1192 pbdev
= s390_pci_find_dev_by_idx(s390_get_phb(), fh
& FH_MASK_INDEX
);
1194 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1198 memset(&fib
, 0, sizeof(fib
));
1200 switch (pbdev
->state
) {
1201 case ZPCI_FS_RESERVED
:
1202 case ZPCI_FS_STANDBY
:
1203 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1205 case ZPCI_FS_DISABLED
:
1206 if (fh
& FH_MASK_ENABLE
) {
1207 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1211 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1212 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1216 case ZPCI_FS_BLOCKED
:
1219 case ZPCI_FS_ENABLED
:
1221 if (pbdev
->iommu
->enabled
) {
1224 if (!(fh
& FH_MASK_ENABLE
)) {
1225 env
->regs
[r1
] |= 1ULL << 63;
1228 case ZPCI_FS_PERMANENT_ERROR
:
1229 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1230 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_PERM_ERROR
);
1234 stq_p(&fib
.pba
, pbdev
->iommu
->pba
);
1235 stq_p(&fib
.pal
, pbdev
->iommu
->pal
);
1236 stq_p(&fib
.iota
, pbdev
->iommu
->g_iota
);
1237 stq_p(&fib
.aibv
, pbdev
->routes
.adapter
.ind_addr
);
1238 stq_p(&fib
.aisb
, pbdev
->routes
.adapter
.summary_addr
);
1239 stq_p(&fib
.fmb_addr
, pbdev
->fmb_addr
);
1241 data
= ((uint32_t)pbdev
->isc
<< 28) | ((uint32_t)pbdev
->noi
<< 16) |
1242 ((uint32_t)pbdev
->routes
.adapter
.ind_offset
<< 8) |
1243 ((uint32_t)pbdev
->sum
<< 7) | pbdev
->routes
.adapter
.summary_offset
;
1244 stl_p(&fib
.data
, data
);
1247 if (s390_cpu_virt_mem_write(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1248 s390_cpu_virt_mem_handle_exc(cpu
, ra
);