pci/aer: fix error injection
[qemu.git] / hw / pcie_aer.c
blob9505c933431ed7b1979f556a5dfdf31ebae91349
1 /*
2 * pcie_aer.c
4 * Copyright (c) 2010 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "sysemu.h"
22 #include "pci_bridge.h"
23 #include "pcie.h"
24 #include "msix.h"
25 #include "msi.h"
26 #include "pci_internals.h"
27 #include "pcie_regs.h"
29 //#define DEBUG_PCIE
30 #ifdef DEBUG_PCIE
31 # define PCIE_DPRINTF(fmt, ...) \
32 fprintf(stderr, "%s:%d " fmt, __func__, __LINE__, ## __VA_ARGS__)
33 #else
34 # define PCIE_DPRINTF(fmt, ...) do {} while (0)
35 #endif
36 #define PCIE_DEV_PRINTF(dev, fmt, ...) \
37 PCIE_DPRINTF("%s:%x "fmt, (dev)->name, (dev)->devfn, ## __VA_ARGS__)
39 /* From 6.2.7 Error Listing and Rules. Table 6-2, 6-3 and 6-4 */
40 static uint32_t pcie_aer_uncor_default_severity(uint32_t status)
42 switch (status) {
43 case PCI_ERR_UNC_INTN:
44 case PCI_ERR_UNC_DLP:
45 case PCI_ERR_UNC_SDN:
46 case PCI_ERR_UNC_RX_OVER:
47 case PCI_ERR_UNC_FCP:
48 case PCI_ERR_UNC_MALF_TLP:
49 return PCI_ERR_ROOT_CMD_FATAL_EN;
50 case PCI_ERR_UNC_POISON_TLP:
51 case PCI_ERR_UNC_ECRC:
52 case PCI_ERR_UNC_UNSUP:
53 case PCI_ERR_UNC_COMP_TIME:
54 case PCI_ERR_UNC_COMP_ABORT:
55 case PCI_ERR_UNC_UNX_COMP:
56 case PCI_ERR_UNC_ACSV:
57 case PCI_ERR_UNC_MCBTLP:
58 case PCI_ERR_UNC_ATOP_EBLOCKED:
59 case PCI_ERR_UNC_TLP_PRF_BLOCKED:
60 return PCI_ERR_ROOT_CMD_NONFATAL_EN;
61 default:
62 abort();
63 break;
65 return PCI_ERR_ROOT_CMD_FATAL_EN;
68 static int aer_log_add_err(PCIEAERLog *aer_log, const PCIEAERErr *err)
70 if (aer_log->log_num == aer_log->log_max) {
71 return -1;
73 memcpy(&aer_log->log[aer_log->log_num], err, sizeof *err);
74 aer_log->log_num++;
75 return 0;
78 static void aer_log_del_err(PCIEAERLog *aer_log, PCIEAERErr *err)
80 assert(aer_log->log_num);
81 *err = aer_log->log[0];
82 aer_log->log_num--;
83 memmove(&aer_log->log[0], &aer_log->log[1],
84 aer_log->log_num * sizeof *err);
87 static void aer_log_clear_all_err(PCIEAERLog *aer_log)
89 aer_log->log_num = 0;
92 int pcie_aer_init(PCIDevice *dev, uint16_t offset)
94 PCIExpressDevice *exp;
96 pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER,
97 offset, PCI_ERR_SIZEOF);
98 exp = &dev->exp;
99 exp->aer_cap = offset;
101 /* log_max is property */
102 if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) {
103 dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT;
105 /* clip down the value to avoid unreasobale memory usage */
106 if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) {
107 return -EINVAL;
109 dev->exp.aer_log.log = qemu_mallocz(sizeof dev->exp.aer_log.log[0] *
110 dev->exp.aer_log.log_max);
112 pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS,
113 PCI_ERR_UNC_SUPPORTED);
115 pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER,
116 PCI_ERR_UNC_SEVERITY_DEFAULT);
117 pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER,
118 PCI_ERR_UNC_SUPPORTED);
120 pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS,
121 PCI_ERR_COR_STATUS);
123 pci_set_long(dev->config + offset + PCI_ERR_COR_MASK,
124 PCI_ERR_COR_MASK_DEFAULT);
125 pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK,
126 PCI_ERR_COR_SUPPORTED);
128 /* capabilities and control. multiple header logging is supported */
129 if (dev->exp.aer_log.log_max > 0) {
130 pci_set_long(dev->config + offset + PCI_ERR_CAP,
131 PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC |
132 PCI_ERR_CAP_MHRC);
133 pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
134 PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE |
135 PCI_ERR_CAP_MHRE);
136 } else {
137 pci_set_long(dev->config + offset + PCI_ERR_CAP,
138 PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC);
139 pci_set_long(dev->wmask + offset + PCI_ERR_CAP,
140 PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
143 switch (pcie_cap_get_type(dev)) {
144 case PCI_EXP_TYPE_ROOT_PORT:
145 /* this case will be set by pcie_aer_root_init() */
146 /* fallthrough */
147 case PCI_EXP_TYPE_DOWNSTREAM:
148 case PCI_EXP_TYPE_UPSTREAM:
149 pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL,
150 PCI_BRIDGE_CTL_SERR);
151 pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS,
152 PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
153 break;
154 default:
155 /* nothing */
156 break;
158 return 0;
161 void pcie_aer_exit(PCIDevice *dev)
163 qemu_free(dev->exp.aer_log.log);
166 static void pcie_aer_update_uncor_status(PCIDevice *dev)
168 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
169 PCIEAERLog *aer_log = &dev->exp.aer_log;
171 uint16_t i;
172 for (i = 0; i < aer_log->log_num; i++) {
173 pci_long_test_and_set_mask(aer_cap + PCI_ERR_UNCOR_STATUS,
174 dev->exp.aer_log.log[i].status);
179 * return value:
180 * true: error message needs to be sent up
181 * false: error message is masked
183 * 6.2.6 Error Message Control
184 * Figure 6-3
185 * all pci express devices part
187 static bool
188 pcie_aer_msg_alldev(PCIDevice *dev, const PCIEAERMsg *msg)
190 if (!(pcie_aer_msg_is_uncor(msg) &&
191 (pci_get_word(dev->config + PCI_COMMAND) & PCI_COMMAND_SERR))) {
192 return false;
195 /* Signaled System Error
197 * 7.5.1.1 Command register
198 * Bit 8 SERR# Enable
200 * When Set, this bit enables reporting of Non-fatal and Fatal
201 * errors detected by the Function to the Root Complex. Note that
202 * errors are reported if enabled either through this bit or through
203 * the PCI Express specific bits in the Device Control register (see
204 * Section 7.8.4).
206 pci_word_test_and_set_mask(dev->config + PCI_STATUS,
207 PCI_STATUS_SIG_SYSTEM_ERROR);
209 if (!(msg->severity &
210 pci_get_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL))) {
211 return false;
214 /* send up error message */
215 return true;
219 * return value:
220 * true: error message is sent up
221 * false: error message is masked
223 * 6.2.6 Error Message Control
224 * Figure 6-3
225 * virtual pci bridge part
227 static bool pcie_aer_msg_vbridge(PCIDevice *dev, const PCIEAERMsg *msg)
229 uint16_t bridge_control = pci_get_word(dev->config + PCI_BRIDGE_CONTROL);
231 if (pcie_aer_msg_is_uncor(msg)) {
232 /* Received System Error */
233 pci_word_test_and_set_mask(dev->config + PCI_SEC_STATUS,
234 PCI_SEC_STATUS_RCV_SYSTEM_ERROR);
237 if (!(bridge_control & PCI_BRIDGE_CTL_SERR)) {
238 return false;
240 return true;
243 void pcie_aer_root_set_vector(PCIDevice *dev, unsigned int vector)
245 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
246 assert(vector < PCI_ERR_ROOT_IRQ_MAX);
247 pci_long_test_and_clear_mask(aer_cap + PCI_ERR_ROOT_STATUS,
248 PCI_ERR_ROOT_IRQ);
249 pci_long_test_and_set_mask(aer_cap + PCI_ERR_ROOT_STATUS,
250 vector << PCI_ERR_ROOT_IRQ_SHIFT);
253 static unsigned int pcie_aer_root_get_vector(PCIDevice *dev)
255 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
256 uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
257 return (root_status & PCI_ERR_ROOT_IRQ) >> PCI_ERR_ROOT_IRQ_SHIFT;
260 /* Given a status register, get corresponding bits in the command register */
261 static uint32_t pcie_aer_status_to_cmd(uint32_t status)
263 uint32_t cmd = 0;
264 if (status & PCI_ERR_ROOT_COR_RCV) {
265 cmd |= PCI_ERR_ROOT_CMD_COR_EN;
267 if (status & PCI_ERR_ROOT_NONFATAL_RCV) {
268 cmd |= PCI_ERR_ROOT_CMD_NONFATAL_EN;
270 if (status & PCI_ERR_ROOT_FATAL_RCV) {
271 cmd |= PCI_ERR_ROOT_CMD_FATAL_EN;
273 return cmd;
277 * return value:
278 * true: error message is sent up
279 * false: error message is masked
281 * 6.2.6 Error Message Control
282 * Figure 6-3
283 * root port part
285 static bool pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
287 bool msg_sent;
288 uint16_t cmd;
289 uint8_t *aer_cap;
290 uint32_t root_cmd;
291 uint32_t root_status, prev_status;
292 bool msi_trigger;
294 msg_sent = false;
295 cmd = pci_get_word(dev->config + PCI_COMMAND);
296 aer_cap = dev->config + dev->exp.aer_cap;
297 root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
298 prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
299 msi_trigger = false;
301 if (cmd & PCI_COMMAND_SERR) {
302 /* System Error.
304 * The way to report System Error is platform specific and
305 * it isn't implemented in qemu right now.
306 * So just discard the error for now.
307 * OS which cares of aer would receive errors via
308 * native aer mechanims, so this wouldn't matter.
312 /* Errro Message Received: Root Error Status register */
313 switch (msg->severity) {
314 case PCI_ERR_ROOT_CMD_COR_EN:
315 if (root_status & PCI_ERR_ROOT_COR_RCV) {
316 root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
317 } else {
318 if (root_cmd & PCI_ERR_ROOT_CMD_COR_EN) {
319 msi_trigger = true;
321 pci_set_word(aer_cap + PCI_ERR_ROOT_COR_SRC, msg->source_id);
323 root_status |= PCI_ERR_ROOT_COR_RCV;
324 break;
325 case PCI_ERR_ROOT_CMD_NONFATAL_EN:
326 if (!(root_status & PCI_ERR_ROOT_NONFATAL_RCV) &&
327 root_cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) {
328 msi_trigger = true;
330 root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
331 break;
332 case PCI_ERR_ROOT_CMD_FATAL_EN:
333 if (!(root_status & PCI_ERR_ROOT_FATAL_RCV) &&
334 root_cmd & PCI_ERR_ROOT_CMD_FATAL_EN) {
335 msi_trigger = true;
337 if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
338 root_status |= PCI_ERR_ROOT_FIRST_FATAL;
340 root_status |= PCI_ERR_ROOT_FATAL_RCV;
341 break;
342 default:
343 abort();
344 break;
346 if (pcie_aer_msg_is_uncor(msg)) {
347 if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
348 root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
349 } else {
350 pci_set_word(aer_cap + PCI_ERR_ROOT_SRC, msg->source_id);
352 root_status |= PCI_ERR_ROOT_UNCOR_RCV;
354 pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);
356 /* 6.2.4.1.2 Interrupt Generation */
357 /* All the above did was set some bits in the status register.
358 * Specifically these that match message severity.
359 * The below code relies on this fact. */
360 if (!(root_cmd & msg->severity) ||
361 (pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
362 /* Condition is not being set or was already true so nothing to do. */
363 return msg_sent;
366 msg_sent = true;
367 if (msix_enabled(dev)) {
368 msix_notify(dev, pcie_aer_root_get_vector(dev));
369 } else if (msi_enabled(dev)) {
370 msi_notify(dev, pcie_aer_root_get_vector(dev));
371 } else {
372 qemu_set_irq(dev->irq[dev->exp.aer_intx], 1);
374 return msg_sent;
378 * 6.2.6 Error Message Control Figure 6-3
380 * Walk up the bus tree from the device, propagate the error message.
382 static void pcie_aer_msg(PCIDevice *dev, const PCIEAERMsg *msg)
384 uint8_t type;
386 while (dev) {
387 if (!pci_is_express(dev)) {
388 /* just ignore it */
389 /* TODO: Shouldn't we set PCI_STATUS_SIG_SYSTEM_ERROR?
390 * Consider e.g. a PCI bridge above a PCI Express device. */
391 return;
394 type = pcie_cap_get_type(dev);
395 if ((type == PCI_EXP_TYPE_ROOT_PORT ||
396 type == PCI_EXP_TYPE_UPSTREAM ||
397 type == PCI_EXP_TYPE_DOWNSTREAM) &&
398 !pcie_aer_msg_vbridge(dev, msg)) {
399 return;
401 if (!pcie_aer_msg_alldev(dev, msg)) {
402 return;
404 if (type == PCI_EXP_TYPE_ROOT_PORT) {
405 pcie_aer_msg_root_port(dev, msg);
406 /* Root port can notify system itself,
407 or send the error message to root complex event collector. */
409 * if root port is associated with an event collector,
410 * return the root complex event collector here.
411 * For now root complex event collector isn't supported.
413 return;
415 dev = pci_bridge_get_device(dev->bus);
419 static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err)
421 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
422 uint8_t first_bit = ffs(err->status) - 1;
423 uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
424 int i;
426 assert(err->status);
427 assert(err->status & (err->status - 1));
429 errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
430 errcap |= PCI_ERR_CAP_FEP(first_bit);
432 if (err->flags & PCIE_AER_ERR_HEADER_VALID) {
433 for (i = 0; i < ARRAY_SIZE(err->header); ++i) {
434 /* 7.10.8 Header Log Register */
435 uint8_t *header_log =
436 aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0];
437 cpu_to_be32wu((uint32_t*)header_log, err->header[i]);
439 } else {
440 assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT));
441 memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
444 if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) &&
445 (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL2) &
446 PCI_EXP_DEVCAP2_EETLPP)) {
447 for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) {
448 /* 7.10.12 tlp prefix log register */
449 uint8_t *prefix_log =
450 aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0];
451 cpu_to_be32wu((uint32_t*)prefix_log, err->prefix[i]);
453 errcap |= PCI_ERR_CAP_TLP;
454 } else {
455 memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0,
456 PCI_ERR_TLP_PREFIX_LOG_SIZE);
458 pci_set_long(aer_cap + PCI_ERR_CAP, errcap);
461 static void pcie_aer_clear_log(PCIDevice *dev)
463 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
465 pci_long_test_and_clear_mask(aer_cap + PCI_ERR_CAP,
466 PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP);
468 memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE);
469 memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE);
472 static void pcie_aer_clear_error(PCIDevice *dev)
474 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
475 uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
476 PCIEAERLog *aer_log = &dev->exp.aer_log;
477 PCIEAERErr err;
479 if (!(errcap & PCI_ERR_CAP_MHRE) || !aer_log->log_num) {
480 pcie_aer_clear_log(dev);
481 return;
485 * If more errors are queued, set corresponding bits in uncorrectable
486 * error status.
487 * We emulate uncorrectable error status register as W1CS.
488 * So set bit in uncorrectable error status here again for multiple
489 * error recording support.
491 * 6.2.4.2 Multiple Error Handling(Advanced Error Reporting Capability)
493 pcie_aer_update_uncor_status(dev);
495 aer_log_del_err(aer_log, &err);
496 pcie_aer_update_log(dev, &err);
499 static int pcie_aer_record_error(PCIDevice *dev,
500 const PCIEAERErr *err)
502 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
503 uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
504 int fep = PCI_ERR_CAP_FEP(errcap);
506 assert(err->status);
507 assert(err->status & (err->status - 1));
509 if (errcap & PCI_ERR_CAP_MHRE &&
510 (pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS) & (1U << fep))) {
511 /* Not first error. queue error */
512 if (aer_log_add_err(&dev->exp.aer_log, err) < 0) {
513 /* overflow */
514 return -1;
516 return 0;
519 pcie_aer_update_log(dev, err);
520 return 0;
523 typedef struct PCIEAERInject {
524 PCIDevice *dev;
525 uint8_t *aer_cap;
526 const PCIEAERErr *err;
527 uint16_t devctl;
528 uint16_t devsta;
529 uint32_t error_status;
530 bool unsupported_request;
531 bool log_overflow;
532 PCIEAERMsg msg;
533 } PCIEAERInject;
535 static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
536 uint32_t uncor_status,
537 bool is_advisory_nonfatal)
539 PCIDevice *dev = inj->dev;
541 inj->devsta |= PCI_EXP_DEVSTA_CED;
542 if (inj->unsupported_request) {
543 inj->devsta |= PCI_EXP_DEVSTA_URD;
545 pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
547 if (inj->aer_cap) {
548 uint32_t mask;
549 pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
550 inj->error_status);
551 mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
552 if (mask & inj->error_status) {
553 return false;
555 if (is_advisory_nonfatal) {
556 uint32_t uncor_mask =
557 pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
558 if (!(uncor_mask & uncor_status)) {
559 inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
561 pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
562 uncor_status);
566 if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
567 return false;
569 if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
570 return false;
573 inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
574 return true;
577 static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal)
579 PCIDevice *dev = inj->dev;
580 uint16_t cmd;
582 if (is_fatal) {
583 inj->devsta |= PCI_EXP_DEVSTA_FED;
584 } else {
585 inj->devsta |= PCI_EXP_DEVSTA_NFED;
587 if (inj->unsupported_request) {
588 inj->devsta |= PCI_EXP_DEVSTA_URD;
590 pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);
592 if (inj->aer_cap) {
593 uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
594 if (mask & inj->error_status) {
595 pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
596 inj->error_status);
597 return false;
600 inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
601 pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
602 inj->error_status);
605 cmd = pci_get_word(dev->config + PCI_COMMAND);
606 if (inj->unsupported_request &&
607 !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) {
608 return false;
610 if (is_fatal) {
611 if (!((cmd & PCI_COMMAND_SERR) ||
612 (inj->devctl & PCI_EXP_DEVCTL_FERE))) {
613 return false;
615 inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN;
616 } else {
617 if (!((cmd & PCI_COMMAND_SERR) ||
618 (inj->devctl & PCI_EXP_DEVCTL_NFERE))) {
619 return false;
621 inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN;
623 return true;
627 * non-Function specific error must be recorded in all functions.
628 * It is the responsibility of the caller of this function.
629 * It is also caller's responsiblity to determine which function should
630 * report the rerror.
632 * 6.2.4 Error Logging
633 * 6.2.5 Sqeunce of Device Error Signaling and Logging Operations
634 * table 6-2: Flowchard Showing Sequence of Device Error Signaling and Logging
635 * Operations
637 int pcie_aer_inject_error(PCIDevice *dev, const PCIEAERErr *err)
639 uint8_t *aer_cap = NULL;
640 uint16_t devctl = 0;
641 uint16_t devsta = 0;
642 uint32_t error_status = err->status;
643 PCIEAERInject inj;
645 if (!pci_is_express(dev)) {
646 return -ENOSYS;
649 if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
650 error_status &= PCI_ERR_COR_SUPPORTED;
651 } else {
652 error_status &= PCI_ERR_UNC_SUPPORTED;
655 /* invalid status bit. one and only one bit must be set */
656 if (!error_status || (error_status & (error_status - 1))) {
657 return -EINVAL;
660 if (dev->exp.aer_cap) {
661 uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
662 aer_cap = dev->config + dev->exp.aer_cap;
663 devctl = pci_get_long(exp_cap + PCI_EXP_DEVCTL);
664 devsta = pci_get_long(exp_cap + PCI_EXP_DEVSTA);
667 inj.dev = dev;
668 inj.aer_cap = aer_cap;
669 inj.err = err;
670 inj.devctl = devctl;
671 inj.devsta = devsta;
672 inj.error_status = error_status;
673 inj.unsupported_request = !(err->flags & PCIE_AER_ERR_IS_CORRECTABLE) &&
674 err->status == PCI_ERR_UNC_UNSUP;
675 inj.log_overflow = false;
677 if (err->flags & PCIE_AER_ERR_IS_CORRECTABLE) {
678 if (!pcie_aer_inject_cor_error(&inj, 0, false)) {
679 return 0;
681 } else {
682 bool is_fatal =
683 pcie_aer_uncor_default_severity(error_status) ==
684 PCI_ERR_ROOT_CMD_FATAL_EN;
685 if (aer_cap) {
686 is_fatal =
687 error_status & pci_get_long(aer_cap + PCI_ERR_UNCOR_SEVER);
689 if (!is_fatal && (err->flags & PCIE_AER_ERR_MAYBE_ADVISORY)) {
690 inj.error_status = PCI_ERR_COR_ADV_NONFATAL;
691 if (!pcie_aer_inject_cor_error(&inj, error_status, true)) {
692 return 0;
694 } else {
695 if (!pcie_aer_inject_uncor_error(&inj, is_fatal)) {
696 return 0;
701 /* send up error message */
702 inj.msg.source_id = err->source_id;
703 pcie_aer_msg(dev, &inj.msg);
705 if (inj.log_overflow) {
706 PCIEAERErr header_log_overflow = {
707 .status = PCI_ERR_COR_HL_OVERFLOW,
708 .flags = PCIE_AER_ERR_IS_CORRECTABLE,
710 int ret = pcie_aer_inject_error(dev, &header_log_overflow);
711 assert(!ret);
713 return 0;
716 void pcie_aer_write_config(PCIDevice *dev,
717 uint32_t addr, uint32_t val, int len)
719 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
720 uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP);
721 uint32_t first_error = 1U << PCI_ERR_CAP_FEP(errcap);
722 uint32_t uncorsta = pci_get_long(aer_cap + PCI_ERR_UNCOR_STATUS);
724 /* uncorrectable error */
725 if (!(uncorsta & first_error)) {
726 /* the bit that corresponds to the first error is cleared */
727 pcie_aer_clear_error(dev);
728 } else if (errcap & PCI_ERR_CAP_MHRE) {
729 /* When PCI_ERR_CAP_MHRE is enabled and the first error isn't cleared
730 * nothing should happen. So we have to revert the modification to
731 * the register.
733 pcie_aer_update_uncor_status(dev);
734 } else {
735 /* capability & control
736 * PCI_ERR_CAP_MHRE might be cleared, so clear of header log.
738 aer_log_clear_all_err(&dev->exp.aer_log);
742 void pcie_aer_root_init(PCIDevice *dev)
744 uint16_t pos = dev->exp.aer_cap;
746 pci_set_long(dev->wmask + pos + PCI_ERR_ROOT_COMMAND,
747 PCI_ERR_ROOT_CMD_EN_MASK);
748 pci_set_long(dev->w1cmask + pos + PCI_ERR_ROOT_STATUS,
749 PCI_ERR_ROOT_STATUS_REPORT_MASK);
752 void pcie_aer_root_reset(PCIDevice *dev)
754 uint8_t* aer_cap = dev->config + dev->exp.aer_cap;
756 pci_set_long(aer_cap + PCI_ERR_ROOT_COMMAND, 0);
759 * Advanced Error Interrupt Message Number in Root Error Status Register
760 * must be updated by chip dependent code because it's chip dependent
761 * which number is used.
765 static bool pcie_aer_root_does_trigger(uint32_t cmd, uint32_t status)
767 return
768 ((cmd & PCI_ERR_ROOT_CMD_COR_EN) && (status & PCI_ERR_ROOT_COR_RCV)) ||
769 ((cmd & PCI_ERR_ROOT_CMD_NONFATAL_EN) &&
770 (status & PCI_ERR_ROOT_NONFATAL_RCV)) ||
771 ((cmd & PCI_ERR_ROOT_CMD_FATAL_EN) &&
772 (status & PCI_ERR_ROOT_FATAL_RCV));
775 void pcie_aer_root_write_config(PCIDevice *dev,
776 uint32_t addr, uint32_t val, int len,
777 uint32_t root_cmd_prev)
779 uint8_t *aer_cap = dev->config + dev->exp.aer_cap;
781 /* root command register */
782 uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
783 if (root_cmd & PCI_ERR_ROOT_CMD_EN_MASK) {
784 /* 6.2.4.1.2 Interrupt Generation */
786 /* 0 -> 1 */
787 uint32_t root_cmd_set = ~root_cmd_prev & root_cmd;
788 uint32_t root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);
789 bool trigger = pcie_aer_root_does_trigger(root_cmd_set, root_status);
791 if (msix_enabled(dev)) {
792 if (trigger) {
793 msix_notify(dev, pcie_aer_root_get_vector(dev));
795 } else if (msi_enabled(dev)) {
796 if (trigger) {
797 msi_notify(dev, pcie_aer_root_get_vector(dev));
799 } else {
800 qemu_set_irq(dev->irq[dev->exp.aer_intx], trigger);
805 static const VMStateDescription vmstate_pcie_aer_err = {
806 .name = "PCIE_AER_ERROR",
807 .version_id = 1,
808 .minimum_version_id = 1,
809 .minimum_version_id_old = 1,
810 .fields = (VMStateField[]) {
811 VMSTATE_UINT32(status, PCIEAERErr),
812 VMSTATE_UINT16(source_id, PCIEAERErr),
813 VMSTATE_UINT16(flags, PCIEAERErr),
814 VMSTATE_UINT32_ARRAY(header, PCIEAERErr, 4),
815 VMSTATE_UINT32_ARRAY(prefix, PCIEAERErr, 4),
816 VMSTATE_END_OF_LIST()
820 #define VMSTATE_PCIE_AER_ERRS(_field, _state, _field_num, _vmsd, _type) { \
821 .name = (stringify(_field)), \
822 .version_id = 0, \
823 .num_offset = vmstate_offset_value(_state, _field_num, uint16_t), \
824 .size = sizeof(_type), \
825 .vmsd = &(_vmsd), \
826 .flags = VMS_POINTER | VMS_VARRAY_UINT16 | VMS_STRUCT, \
827 .offset = vmstate_offset_pointer(_state, _field, _type), \
830 const VMStateDescription vmstate_pcie_aer_log = {
831 .name = "PCIE_AER_ERROR_LOG",
832 .version_id = 1,
833 .minimum_version_id = 1,
834 .minimum_version_id_old = 1,
835 .fields = (VMStateField[]) {
836 VMSTATE_UINT16(log_num, PCIEAERLog),
837 VMSTATE_UINT16(log_max, PCIEAERLog),
838 VMSTATE_PCIE_AER_ERRS(log, PCIEAERLog, log_num,
839 vmstate_pcie_aer_err, PCIEAERErr),
840 VMSTATE_END_OF_LIST()