2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/sysdev.h>
29 #include <linux/ctype.h>
30 #include <linux/edac.h>
31 #include <asm/uaccess.h>
34 #include "edac_core.h"
35 #include "edac_module.h"
37 /* lock to memory controller's control array */
38 static DEFINE_MUTEX(mem_ctls_mutex
);
39 static LIST_HEAD(mc_devices
);
41 #ifdef CONFIG_EDAC_DEBUG
43 static void edac_mc_dump_channel(struct channel_info
*chan
)
45 debugf4("\tchannel = %p\n", chan
);
46 debugf4("\tchannel->chan_idx = %d\n", chan
->chan_idx
);
47 debugf4("\tchannel->ce_count = %d\n", chan
->ce_count
);
48 debugf4("\tchannel->label = '%s'\n", chan
->label
);
49 debugf4("\tchannel->csrow = %p\n\n", chan
->csrow
);
52 static void edac_mc_dump_csrow(struct csrow_info
*csrow
)
54 debugf4("\tcsrow = %p\n", csrow
);
55 debugf4("\tcsrow->csrow_idx = %d\n", csrow
->csrow_idx
);
56 debugf4("\tcsrow->first_page = 0x%lx\n", csrow
->first_page
);
57 debugf4("\tcsrow->last_page = 0x%lx\n", csrow
->last_page
);
58 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow
->page_mask
);
59 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow
->nr_pages
);
60 debugf4("\tcsrow->nr_channels = %d\n", csrow
->nr_channels
);
61 debugf4("\tcsrow->channels = %p\n", csrow
->channels
);
62 debugf4("\tcsrow->mci = %p\n\n", csrow
->mci
);
65 static void edac_mc_dump_mci(struct mem_ctl_info
*mci
)
67 debugf3("\tmci = %p\n", mci
);
68 debugf3("\tmci->mtype_cap = %lx\n", mci
->mtype_cap
);
69 debugf3("\tmci->edac_ctl_cap = %lx\n", mci
->edac_ctl_cap
);
70 debugf3("\tmci->edac_cap = %lx\n", mci
->edac_cap
);
71 debugf4("\tmci->edac_check = %p\n", mci
->edac_check
);
72 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
73 mci
->nr_csrows
, mci
->csrows
);
74 debugf3("\tdev = %p\n", mci
->dev
);
75 debugf3("\tmod_name:ctl_name = %s:%s\n", mci
->mod_name
, mci
->ctl_name
);
76 debugf3("\tpvt_info = %p\n\n", mci
->pvt_info
);
79 #endif /* CONFIG_EDAC_DEBUG */
81 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
82 * Adjust 'ptr' so that its alignment is at least as stringent as what the
83 * compiler would provide for X and return the aligned result.
85 * If 'size' is a constant, the compiler will optimize this whole function
86 * down to either a no-op or the addition of a constant to the value of 'ptr'.
88 void *edac_align_ptr(void *ptr
, unsigned size
)
92 /* Here we assume that the alignment of a "long long" is the most
93 * stringent alignment that the compiler will ever provide by default.
94 * As far as I know, this is a reasonable assumption.
96 if (size
> sizeof(long))
97 align
= sizeof(long long);
98 else if (size
> sizeof(int))
100 else if (size
> sizeof(short))
102 else if (size
> sizeof(char))
103 align
= sizeof(short);
112 return (void *)(((unsigned long)ptr
) + align
- r
);
116 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
117 * @size_pvt: size of private storage needed
118 * @nr_csrows: Number of CWROWS needed for this MC
119 * @nr_chans: Number of channels for the MC
121 * Everything is kmalloc'ed as one big chunk - more efficient.
122 * Only can be used if all structures have the same lifetime - otherwise
123 * you have to allocate and initialize your own structures.
125 * Use edac_mc_free() to free mc structures allocated by this function.
128 * NULL allocation failed
129 * struct mem_ctl_info pointer
131 struct mem_ctl_info
*edac_mc_alloc(unsigned sz_pvt
, unsigned nr_csrows
,
132 unsigned nr_chans
, int edac_index
)
134 struct mem_ctl_info
*mci
;
135 struct csrow_info
*csi
, *csrow
;
136 struct channel_info
*chi
, *chp
, *chan
;
142 /* Figure out the offsets of the various items from the start of an mc
143 * structure. We want the alignment of each item to be at least as
144 * stringent as what the compiler would provide if we could simply
145 * hardcode everything into a single struct.
147 mci
= (struct mem_ctl_info
*)0;
148 csi
= edac_align_ptr(&mci
[1], sizeof(*csi
));
149 chi
= edac_align_ptr(&csi
[nr_csrows
], sizeof(*chi
));
150 pvt
= edac_align_ptr(&chi
[nr_chans
* nr_csrows
], sz_pvt
);
151 size
= ((unsigned long)pvt
) + sz_pvt
;
153 mci
= kzalloc(size
, GFP_KERNEL
);
157 /* Adjust pointers so they point within the memory we just allocated
158 * rather than an imaginary chunk of memory located at address 0.
160 csi
= (struct csrow_info
*)(((char *)mci
) + ((unsigned long)csi
));
161 chi
= (struct channel_info
*)(((char *)mci
) + ((unsigned long)chi
));
162 pvt
= sz_pvt
? (((char *)mci
) + ((unsigned long)pvt
)) : NULL
;
164 /* setup index and various internal pointers */
165 mci
->mc_idx
= edac_index
;
168 mci
->nr_csrows
= nr_csrows
;
170 for (row
= 0; row
< nr_csrows
; row
++) {
172 csrow
->csrow_idx
= row
;
174 csrow
->nr_channels
= nr_chans
;
175 chp
= &chi
[row
* nr_chans
];
176 csrow
->channels
= chp
;
178 for (chn
= 0; chn
< nr_chans
; chn
++) {
180 chan
->chan_idx
= chn
;
185 mci
->op_state
= OP_ALLOC
;
188 * Initialize the 'root' kobj for the edac_mc controller
190 err
= edac_mc_register_sysfs_main_kobj(mci
);
196 /* at this point, the root kobj is valid, and in order to
197 * 'free' the object, then the function:
198 * edac_mc_unregister_sysfs_main_kobj() must be called
199 * which will perform kobj unregistration and the actual free
200 * will occur during the kobject callback operation
204 EXPORT_SYMBOL_GPL(edac_mc_alloc
);
208 * 'Free' a previously allocated 'mci' structure
209 * @mci: pointer to a struct mem_ctl_info structure
211 void edac_mc_free(struct mem_ctl_info
*mci
)
213 edac_mc_unregister_sysfs_main_kobj(mci
);
215 EXPORT_SYMBOL_GPL(edac_mc_free
);
221 * scan list of controllers looking for the one that manages
224 static struct mem_ctl_info
*find_mci_by_dev(struct device
*dev
)
226 struct mem_ctl_info
*mci
;
227 struct list_head
*item
;
229 debugf3("%s()\n", __func__
);
231 list_for_each(item
, &mc_devices
) {
232 mci
= list_entry(item
, struct mem_ctl_info
, link
);
242 * handler for EDAC to check if NMI type handler has asserted interrupt
244 static int edac_mc_assert_error_check_and_clear(void)
248 if (edac_op_state
== EDAC_OPSTATE_POLL
)
251 old_state
= edac_err_assert
;
258 * edac_mc_workq_function
259 * performs the operation scheduled by a workq request
261 static void edac_mc_workq_function(struct work_struct
*work_req
)
263 struct delayed_work
*d_work
= (struct delayed_work
*)work_req
;
264 struct mem_ctl_info
*mci
= to_edac_mem_ctl_work(d_work
);
266 mutex_lock(&mem_ctls_mutex
);
268 /* if this control struct has movd to offline state, we are done */
269 if (mci
->op_state
== OP_OFFLINE
) {
270 mutex_unlock(&mem_ctls_mutex
);
274 /* Only poll controllers that are running polled and have a check */
275 if (edac_mc_assert_error_check_and_clear() && (mci
->edac_check
!= NULL
))
276 mci
->edac_check(mci
);
278 mutex_unlock(&mem_ctls_mutex
);
281 queue_delayed_work(edac_workqueue
, &mci
->work
,
282 msecs_to_jiffies(edac_mc_get_poll_msec()));
286 * edac_mc_workq_setup
287 * initialize a workq item for this mci
288 * passing in the new delay period in msec
292 * called with the mem_ctls_mutex held
294 static void edac_mc_workq_setup(struct mem_ctl_info
*mci
, unsigned msec
)
296 debugf0("%s()\n", __func__
);
298 /* if this instance is not in the POLL state, then simply return */
299 if (mci
->op_state
!= OP_RUNNING_POLL
)
302 INIT_DELAYED_WORK(&mci
->work
, edac_mc_workq_function
);
303 queue_delayed_work(edac_workqueue
, &mci
->work
, msecs_to_jiffies(msec
));
307 * edac_mc_workq_teardown
308 * stop the workq processing on this mci
312 * called WITHOUT lock held
314 static void edac_mc_workq_teardown(struct mem_ctl_info
*mci
)
318 status
= cancel_delayed_work(&mci
->work
);
320 debugf0("%s() not canceled, flush the queue\n",
323 /* workq instance might be running, wait for it */
324 flush_workqueue(edac_workqueue
);
329 * edac_mc_reset_delay_period(unsigned long value)
331 * user space has updated our poll period value, need to
332 * reset our workq delays
334 void edac_mc_reset_delay_period(int value
)
336 struct mem_ctl_info
*mci
;
337 struct list_head
*item
;
339 mutex_lock(&mem_ctls_mutex
);
341 /* scan the list and turn off all workq timers, doing so under lock
343 list_for_each(item
, &mc_devices
) {
344 mci
= list_entry(item
, struct mem_ctl_info
, link
);
346 if (mci
->op_state
== OP_RUNNING_POLL
)
347 cancel_delayed_work(&mci
->work
);
350 mutex_unlock(&mem_ctls_mutex
);
353 /* re-walk the list, and reset the poll delay */
354 mutex_lock(&mem_ctls_mutex
);
356 list_for_each(item
, &mc_devices
) {
357 mci
= list_entry(item
, struct mem_ctl_info
, link
);
359 edac_mc_workq_setup(mci
, (unsigned long) value
);
362 mutex_unlock(&mem_ctls_mutex
);
367 /* Return 0 on success, 1 on failure.
368 * Before calling this function, caller must
369 * assign a unique value to mci->mc_idx.
373 * called with the mem_ctls_mutex lock held
375 static int add_mc_to_global_list(struct mem_ctl_info
*mci
)
377 struct list_head
*item
, *insert_before
;
378 struct mem_ctl_info
*p
;
380 insert_before
= &mc_devices
;
382 p
= find_mci_by_dev(mci
->dev
);
383 if (unlikely(p
!= NULL
))
386 list_for_each(item
, &mc_devices
) {
387 p
= list_entry(item
, struct mem_ctl_info
, link
);
389 if (p
->mc_idx
>= mci
->mc_idx
) {
390 if (unlikely(p
->mc_idx
== mci
->mc_idx
))
393 insert_before
= item
;
398 list_add_tail_rcu(&mci
->link
, insert_before
);
399 atomic_inc(&edac_handlers
);
403 edac_printk(KERN_WARNING
, EDAC_MC
,
404 "%s (%s) %s %s already assigned %d\n", p
->dev
->bus_id
,
405 dev_name(mci
), p
->mod_name
, p
->ctl_name
, p
->mc_idx
);
409 edac_printk(KERN_WARNING
, EDAC_MC
,
410 "bug in low-level driver: attempt to assign\n"
411 " duplicate mc_idx %d in %s()\n", p
->mc_idx
, __func__
);
415 static void complete_mc_list_del(struct rcu_head
*head
)
417 struct mem_ctl_info
*mci
;
419 mci
= container_of(head
, struct mem_ctl_info
, rcu
);
420 INIT_LIST_HEAD(&mci
->link
);
421 complete(&mci
->complete
);
424 static void del_mc_from_global_list(struct mem_ctl_info
*mci
)
426 atomic_dec(&edac_handlers
);
427 list_del_rcu(&mci
->link
);
428 init_completion(&mci
->complete
);
429 call_rcu(&mci
->rcu
, complete_mc_list_del
);
430 wait_for_completion(&mci
->complete
);
434 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
436 * If found, return a pointer to the structure.
439 * Caller must hold mem_ctls_mutex.
441 struct mem_ctl_info
*edac_mc_find(int idx
)
443 struct list_head
*item
;
444 struct mem_ctl_info
*mci
;
446 list_for_each(item
, &mc_devices
) {
447 mci
= list_entry(item
, struct mem_ctl_info
, link
);
449 if (mci
->mc_idx
>= idx
) {
450 if (mci
->mc_idx
== idx
)
459 EXPORT_SYMBOL(edac_mc_find
);
462 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
463 * create sysfs entries associated with mci structure
464 * @mci: pointer to the mci structure to be added to the list
465 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
472 /* FIXME - should a warning be printed if no error detection? correction? */
473 int edac_mc_add_mc(struct mem_ctl_info
*mci
)
475 debugf0("%s()\n", __func__
);
477 #ifdef CONFIG_EDAC_DEBUG
478 if (edac_debug_level
>= 3)
479 edac_mc_dump_mci(mci
);
481 if (edac_debug_level
>= 4) {
484 for (i
= 0; i
< mci
->nr_csrows
; i
++) {
487 edac_mc_dump_csrow(&mci
->csrows
[i
]);
488 for (j
= 0; j
< mci
->csrows
[i
].nr_channels
; j
++)
489 edac_mc_dump_channel(&mci
->csrows
[i
].
494 mutex_lock(&mem_ctls_mutex
);
496 if (add_mc_to_global_list(mci
))
499 /* set load time so that error rate can be tracked */
500 mci
->start_time
= jiffies
;
502 if (edac_create_sysfs_mci_device(mci
)) {
503 edac_mc_printk(mci
, KERN_WARNING
,
504 "failed to create sysfs device\n");
508 /* If there IS a check routine, then we are running POLLED */
509 if (mci
->edac_check
!= NULL
) {
510 /* This instance is NOW RUNNING */
511 mci
->op_state
= OP_RUNNING_POLL
;
513 edac_mc_workq_setup(mci
, edac_mc_get_poll_msec());
515 mci
->op_state
= OP_RUNNING_INTERRUPT
;
518 /* Report action taken */
519 edac_mc_printk(mci
, KERN_INFO
, "Giving out device to '%s' '%s':"
520 " DEV %s\n", mci
->mod_name
, mci
->ctl_name
, dev_name(mci
));
522 mutex_unlock(&mem_ctls_mutex
);
526 del_mc_from_global_list(mci
);
529 mutex_unlock(&mem_ctls_mutex
);
532 EXPORT_SYMBOL_GPL(edac_mc_add_mc
);
535 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
536 * remove mci structure from global list
537 * @pdev: Pointer to 'struct device' representing mci structure to remove.
539 * Return pointer to removed mci structure, or NULL if device not found.
541 struct mem_ctl_info
*edac_mc_del_mc(struct device
*dev
)
543 struct mem_ctl_info
*mci
;
545 debugf0("%s()\n", __func__
);
547 mutex_lock(&mem_ctls_mutex
);
549 /* find the requested mci struct in the global list */
550 mci
= find_mci_by_dev(dev
);
552 mutex_unlock(&mem_ctls_mutex
);
556 /* marking MCI offline */
557 mci
->op_state
= OP_OFFLINE
;
559 del_mc_from_global_list(mci
);
560 mutex_unlock(&mem_ctls_mutex
);
562 /* flush workq processes and remove sysfs */
563 edac_mc_workq_teardown(mci
);
564 edac_remove_sysfs_mci_device(mci
);
566 edac_printk(KERN_INFO
, EDAC_MC
,
567 "Removed device %d for %s %s: DEV %s\n", mci
->mc_idx
,
568 mci
->mod_name
, mci
->ctl_name
, dev_name(mci
));
572 EXPORT_SYMBOL_GPL(edac_mc_del_mc
);
574 static void edac_mc_scrub_block(unsigned long page
, unsigned long offset
,
579 unsigned long flags
= 0;
581 debugf3("%s()\n", __func__
);
583 /* ECC error page was not in our memory. Ignore it. */
584 if (!pfn_valid(page
))
587 /* Find the actual page structure then map it and fix */
588 pg
= pfn_to_page(page
);
591 local_irq_save(flags
);
593 virt_addr
= kmap_atomic(pg
, KM_BOUNCE_READ
);
595 /* Perform architecture specific atomic scrub operation */
596 atomic_scrub(virt_addr
+ offset
, size
);
598 /* Unmap and complete */
599 kunmap_atomic(virt_addr
, KM_BOUNCE_READ
);
602 local_irq_restore(flags
);
605 /* FIXME - should return -1 */
606 int edac_mc_find_csrow_by_page(struct mem_ctl_info
*mci
, unsigned long page
)
608 struct csrow_info
*csrows
= mci
->csrows
;
611 debugf1("MC%d: %s(): 0x%lx\n", mci
->mc_idx
, __func__
, page
);
614 for (i
= 0; i
< mci
->nr_csrows
; i
++) {
615 struct csrow_info
*csrow
= &csrows
[i
];
617 if (csrow
->nr_pages
== 0)
620 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
621 "mask(0x%lx)\n", mci
->mc_idx
, __func__
,
622 csrow
->first_page
, page
, csrow
->last_page
,
625 if ((page
>= csrow
->first_page
) &&
626 (page
<= csrow
->last_page
) &&
627 ((page
& csrow
->page_mask
) ==
628 (csrow
->first_page
& csrow
->page_mask
))) {
635 edac_mc_printk(mci
, KERN_ERR
,
636 "could not look up page error address %lx\n",
637 (unsigned long)page
);
641 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page
);
643 /* FIXME - setable log (warning/emerg) levels */
644 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
645 void edac_mc_handle_ce(struct mem_ctl_info
*mci
,
646 unsigned long page_frame_number
,
647 unsigned long offset_in_page
, unsigned long syndrome
,
648 int row
, int channel
, const char *msg
)
650 unsigned long remapped_page
;
652 debugf3("MC%d: %s()\n", mci
->mc_idx
, __func__
);
654 /* FIXME - maybe make panic on INTERNAL ERROR an option */
655 if (row
>= mci
->nr_csrows
|| row
< 0) {
656 /* something is wrong */
657 edac_mc_printk(mci
, KERN_ERR
,
658 "INTERNAL ERROR: row out of range "
659 "(%d >= %d)\n", row
, mci
->nr_csrows
);
660 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
664 if (channel
>= mci
->csrows
[row
].nr_channels
|| channel
< 0) {
665 /* something is wrong */
666 edac_mc_printk(mci
, KERN_ERR
,
667 "INTERNAL ERROR: channel out of range "
668 "(%d >= %d)\n", channel
,
669 mci
->csrows
[row
].nr_channels
);
670 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
674 if (edac_mc_get_log_ce())
675 /* FIXME - put in DIMM location */
676 edac_mc_printk(mci
, KERN_WARNING
,
677 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
678 "0x%lx, row %d, channel %d, label \"%s\": %s\n",
679 page_frame_number
, offset_in_page
,
680 mci
->csrows
[row
].grain
, syndrome
, row
, channel
,
681 mci
->csrows
[row
].channels
[channel
].label
, msg
);
684 mci
->csrows
[row
].ce_count
++;
685 mci
->csrows
[row
].channels
[channel
].ce_count
++;
687 if (mci
->scrub_mode
& SCRUB_SW_SRC
) {
689 * Some MC's can remap memory so that it is still available
690 * at a different address when PCI devices map into memory.
691 * MC's that can't do this lose the memory where PCI devices
692 * are mapped. This mapping is MC dependant and so we call
693 * back into the MC driver for it to map the MC page to
694 * a physical (CPU) page which can then be mapped to a virtual
695 * page - which can then be scrubbed.
697 remapped_page
= mci
->ctl_page_to_phys
?
698 mci
->ctl_page_to_phys(mci
, page_frame_number
) :
701 edac_mc_scrub_block(remapped_page
, offset_in_page
,
702 mci
->csrows
[row
].grain
);
705 EXPORT_SYMBOL_GPL(edac_mc_handle_ce
);
707 void edac_mc_handle_ce_no_info(struct mem_ctl_info
*mci
, const char *msg
)
709 if (edac_mc_get_log_ce())
710 edac_mc_printk(mci
, KERN_WARNING
,
711 "CE - no information available: %s\n", msg
);
713 mci
->ce_noinfo_count
++;
716 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info
);
718 void edac_mc_handle_ue(struct mem_ctl_info
*mci
,
719 unsigned long page_frame_number
,
720 unsigned long offset_in_page
, int row
, const char *msg
)
722 int len
= EDAC_MC_LABEL_LEN
* 4;
723 char labels
[len
+ 1];
728 debugf3("MC%d: %s()\n", mci
->mc_idx
, __func__
);
730 /* FIXME - maybe make panic on INTERNAL ERROR an option */
731 if (row
>= mci
->nr_csrows
|| row
< 0) {
732 /* something is wrong */
733 edac_mc_printk(mci
, KERN_ERR
,
734 "INTERNAL ERROR: row out of range "
735 "(%d >= %d)\n", row
, mci
->nr_csrows
);
736 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
740 chars
= snprintf(pos
, len
+ 1, "%s",
741 mci
->csrows
[row
].channels
[0].label
);
745 for (chan
= 1; (chan
< mci
->csrows
[row
].nr_channels
) && (len
> 0);
747 chars
= snprintf(pos
, len
+ 1, ":%s",
748 mci
->csrows
[row
].channels
[chan
].label
);
753 if (edac_mc_get_log_ue())
754 edac_mc_printk(mci
, KERN_EMERG
,
755 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
756 "labels \"%s\": %s\n", page_frame_number
,
757 offset_in_page
, mci
->csrows
[row
].grain
, row
,
760 if (edac_mc_get_panic_on_ue())
761 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
762 "row %d, labels \"%s\": %s\n", mci
->mc_idx
,
763 page_frame_number
, offset_in_page
,
764 mci
->csrows
[row
].grain
, row
, labels
, msg
);
767 mci
->csrows
[row
].ue_count
++;
769 EXPORT_SYMBOL_GPL(edac_mc_handle_ue
);
771 void edac_mc_handle_ue_no_info(struct mem_ctl_info
*mci
, const char *msg
)
773 if (edac_mc_get_panic_on_ue())
774 panic("EDAC MC%d: Uncorrected Error", mci
->mc_idx
);
776 if (edac_mc_get_log_ue())
777 edac_mc_printk(mci
, KERN_WARNING
,
778 "UE - no information available: %s\n", msg
);
779 mci
->ue_noinfo_count
++;
782 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info
);
784 /*************************************************************
785 * On Fully Buffered DIMM modules, this help function is
786 * called to process UE events
788 void edac_mc_handle_fbd_ue(struct mem_ctl_info
*mci
,
790 unsigned int channela
,
791 unsigned int channelb
, char *msg
)
793 int len
= EDAC_MC_LABEL_LEN
* 4;
794 char labels
[len
+ 1];
798 if (csrow
>= mci
->nr_csrows
) {
799 /* something is wrong */
800 edac_mc_printk(mci
, KERN_ERR
,
801 "INTERNAL ERROR: row out of range (%d >= %d)\n",
802 csrow
, mci
->nr_csrows
);
803 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
807 if (channela
>= mci
->csrows
[csrow
].nr_channels
) {
808 /* something is wrong */
809 edac_mc_printk(mci
, KERN_ERR
,
810 "INTERNAL ERROR: channel-a out of range "
812 channela
, mci
->csrows
[csrow
].nr_channels
);
813 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
817 if (channelb
>= mci
->csrows
[csrow
].nr_channels
) {
818 /* something is wrong */
819 edac_mc_printk(mci
, KERN_ERR
,
820 "INTERNAL ERROR: channel-b out of range "
822 channelb
, mci
->csrows
[csrow
].nr_channels
);
823 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
828 mci
->csrows
[csrow
].ue_count
++;
830 /* Generate the DIMM labels from the specified channels */
831 chars
= snprintf(pos
, len
+ 1, "%s",
832 mci
->csrows
[csrow
].channels
[channela
].label
);
835 chars
= snprintf(pos
, len
+ 1, "-%s",
836 mci
->csrows
[csrow
].channels
[channelb
].label
);
838 if (edac_mc_get_log_ue())
839 edac_mc_printk(mci
, KERN_EMERG
,
840 "UE row %d, channel-a= %d channel-b= %d "
841 "labels \"%s\": %s\n", csrow
, channela
, channelb
,
844 if (edac_mc_get_panic_on_ue())
845 panic("UE row %d, channel-a= %d channel-b= %d "
846 "labels \"%s\": %s\n", csrow
, channela
,
847 channelb
, labels
, msg
);
849 EXPORT_SYMBOL(edac_mc_handle_fbd_ue
);
851 /*************************************************************
852 * On Fully Buffered DIMM modules, this help function is
853 * called to process CE events
855 void edac_mc_handle_fbd_ce(struct mem_ctl_info
*mci
,
856 unsigned int csrow
, unsigned int channel
, char *msg
)
859 /* Ensure boundary values */
860 if (csrow
>= mci
->nr_csrows
) {
861 /* something is wrong */
862 edac_mc_printk(mci
, KERN_ERR
,
863 "INTERNAL ERROR: row out of range (%d >= %d)\n",
864 csrow
, mci
->nr_csrows
);
865 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
868 if (channel
>= mci
->csrows
[csrow
].nr_channels
) {
869 /* something is wrong */
870 edac_mc_printk(mci
, KERN_ERR
,
871 "INTERNAL ERROR: channel out of range (%d >= %d)\n",
872 channel
, mci
->csrows
[csrow
].nr_channels
);
873 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
877 if (edac_mc_get_log_ce())
878 /* FIXME - put in DIMM location */
879 edac_mc_printk(mci
, KERN_WARNING
,
880 "CE row %d, channel %d, label \"%s\": %s\n",
882 mci
->csrows
[csrow
].channels
[channel
].label
, msg
);
885 mci
->csrows
[csrow
].ce_count
++;
886 mci
->csrows
[csrow
].channels
[channel
].ce_count
++;
888 EXPORT_SYMBOL(edac_mc_handle_fbd_ce
);