x86/uv: Fix uninitialized spinlocks
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / platform / uv / tlb_uv.c
blob81aee5abd42c5cb511472cb07f6d0b968b7d6fc3
1 /*
2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
7 * later.
8 */
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
21 #include <asm/apic.h>
22 #include <asm/idle.h>
23 #include <asm/tsc.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns[] = {
29 20,
30 160,
31 1280,
32 10240,
33 81920,
34 655360,
35 5242880,
36 167772160
39 static int timeout_us;
40 static int nobau;
41 static int baudisabled;
42 static spinlock_t disable_lock;
43 static cycles_t congested_cycles;
45 /* tunables: */
46 static int max_concurr = MAX_BAU_CONCURRENT;
47 static int max_concurr_const = MAX_BAU_CONCURRENT;
48 static int plugged_delay = PLUGGED_DELAY;
49 static int plugsb4reset = PLUGSB4RESET;
50 static int timeoutsb4reset = TIMEOUTSB4RESET;
51 static int ipi_reset_limit = IPI_RESET_LIMIT;
52 static int complete_threshold = COMPLETE_THRESHOLD;
53 static int congested_respns_us = CONGESTED_RESPONSE_US;
54 static int congested_reps = CONGESTED_REPS;
55 static int congested_period = CONGESTED_PERIOD;
57 static struct tunables tunables[] = {
58 {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59 {&plugged_delay, PLUGGED_DELAY},
60 {&plugsb4reset, PLUGSB4RESET},
61 {&timeoutsb4reset, TIMEOUTSB4RESET},
62 {&ipi_reset_limit, IPI_RESET_LIMIT},
63 {&complete_threshold, COMPLETE_THRESHOLD},
64 {&congested_respns_us, CONGESTED_RESPONSE_US},
65 {&congested_reps, CONGESTED_REPS},
66 {&congested_period, CONGESTED_PERIOD}
69 static struct dentry *tunables_dir;
70 static struct dentry *tunables_file;
72 /* these correspond to the statistics printed by ptc_seq_show() */
73 static char *stat_description[] = {
74 "sent: number of shootdown messages sent",
75 "stime: time spent sending messages",
76 "numuvhubs: number of hubs targeted with shootdown",
77 "numuvhubs16: number times 16 or more hubs targeted",
78 "numuvhubs8: number times 8 or more hubs targeted",
79 "numuvhubs4: number times 4 or more hubs targeted",
80 "numuvhubs2: number times 2 or more hubs targeted",
81 "numuvhubs1: number times 1 hub targeted",
82 "numcpus: number of cpus targeted with shootdown",
83 "dto: number of destination timeouts",
84 "retries: destination timeout retries sent",
85 "rok: : destination timeouts successfully retried",
86 "resetp: ipi-style resource resets for plugs",
87 "resett: ipi-style resource resets for timeouts",
88 "giveup: fall-backs to ipi-style shootdowns",
89 "sto: number of source timeouts",
90 "bz: number of stay-busy's",
91 "throt: number times spun in throttle",
92 "swack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93 "recv: shootdown messages received",
94 "rtime: time spent processing messages",
95 "all: shootdown all-tlb messages",
96 "one: shootdown one-tlb messages",
97 "mult: interrupts that found multiple messages",
98 "none: interrupts that found no messages",
99 "retry: number of retry messages processed",
100 "canc: number messages canceled by retries",
101 "nocan: number retries that found nothing to cancel",
102 "reset: number of ipi-style reset requests processed",
103 "rcan: number messages canceled by reset requests",
104 "disable: number times use of the BAU was disabled",
105 "enable: number times use of the BAU was re-enabled"
108 static int __init
109 setup_nobau(char *arg)
111 nobau = 1;
112 return 0;
114 early_param("nobau", setup_nobau);
116 /* base pnode in this partition */
117 static int uv_base_pnode __read_mostly;
119 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
120 static DEFINE_PER_CPU(struct bau_control, bau_control);
121 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
124 * Determine the first node on a uvhub. 'Nodes' are used for kernel
125 * memory allocation.
127 static int __init uvhub_to_first_node(int uvhub)
129 int node, b;
131 for_each_online_node(node) {
132 b = uv_node_to_blade_id(node);
133 if (uvhub == b)
134 return node;
136 return -1;
140 * Determine the apicid of the first cpu on a uvhub.
142 static int __init uvhub_to_first_apicid(int uvhub)
144 int cpu;
146 for_each_present_cpu(cpu)
147 if (uvhub == uv_cpu_to_blade_id(cpu))
148 return per_cpu(x86_cpu_to_apicid, cpu);
149 return -1;
153 * Free a software acknowledge hardware resource by clearing its Pending
154 * bit. This will return a reply to the sender.
155 * If the message has timed out, a reply has already been sent by the
156 * hardware but the resource has not been released. In that case our
157 * clear of the Timeout bit (as well) will free the resource. No reply will
158 * be sent (the hardware will only do one reply per message).
160 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
161 int do_acknowledge)
163 unsigned long dw;
164 struct bau_pq_entry *msg;
166 msg = mdp->msg;
167 if (!msg->canceled && do_acknowledge) {
168 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
169 write_mmr_sw_ack(dw);
171 msg->replied_to = 1;
172 msg->swack_vec = 0;
176 * Process the receipt of a RETRY message
178 static void bau_process_retry_msg(struct msg_desc *mdp,
179 struct bau_control *bcp)
181 int i;
182 int cancel_count = 0;
183 unsigned long msg_res;
184 unsigned long mmr = 0;
185 struct bau_pq_entry *msg = mdp->msg;
186 struct bau_pq_entry *msg2;
187 struct ptc_stats *stat = bcp->statp;
189 stat->d_retries++;
191 * cancel any message from msg+1 to the retry itself
193 for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
194 if (msg2 > mdp->queue_last)
195 msg2 = mdp->queue_first;
196 if (msg2 == msg)
197 break;
199 /* same conditions for cancellation as do_reset */
200 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
201 (msg2->swack_vec) && ((msg2->swack_vec &
202 msg->swack_vec) == 0) &&
203 (msg2->sending_cpu == msg->sending_cpu) &&
204 (msg2->msg_type != MSG_NOOP)) {
205 mmr = read_mmr_sw_ack();
206 msg_res = msg2->swack_vec;
208 * This is a message retry; clear the resources held
209 * by the previous message only if they timed out.
210 * If it has not timed out we have an unexpected
211 * situation to report.
213 if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
214 unsigned long mr;
216 * Is the resource timed out?
217 * Make everyone ignore the cancelled message.
219 msg2->canceled = 1;
220 stat->d_canceled++;
221 cancel_count++;
222 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
223 write_mmr_sw_ack(mr);
227 if (!cancel_count)
228 stat->d_nocanceled++;
232 * Do all the things a cpu should do for a TLB shootdown message.
233 * Other cpu's may come here at the same time for this message.
235 static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
236 int do_acknowledge)
238 short socket_ack_count = 0;
239 short *sp;
240 struct atomic_short *asp;
241 struct ptc_stats *stat = bcp->statp;
242 struct bau_pq_entry *msg = mdp->msg;
243 struct bau_control *smaster = bcp->socket_master;
246 * This must be a normal message, or retry of a normal message
248 if (msg->address == TLB_FLUSH_ALL) {
249 local_flush_tlb();
250 stat->d_alltlb++;
251 } else {
252 __flush_tlb_one(msg->address);
253 stat->d_onetlb++;
255 stat->d_requestee++;
258 * One cpu on each uvhub has the additional job on a RETRY
259 * of releasing the resource held by the message that is
260 * being retried. That message is identified by sending
261 * cpu number.
263 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
264 bau_process_retry_msg(mdp, bcp);
267 * This is a swack message, so we have to reply to it.
268 * Count each responding cpu on the socket. This avoids
269 * pinging the count's cache line back and forth between
270 * the sockets.
272 sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
273 asp = (struct atomic_short *)sp;
274 socket_ack_count = atom_asr(1, asp);
275 if (socket_ack_count == bcp->cpus_in_socket) {
276 int msg_ack_count;
278 * Both sockets dump their completed count total into
279 * the message's count.
281 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
282 asp = (struct atomic_short *)&msg->acknowledge_count;
283 msg_ack_count = atom_asr(socket_ack_count, asp);
285 if (msg_ack_count == bcp->cpus_in_uvhub) {
287 * All cpus in uvhub saw it; reply
288 * (unless we are in the UV2 workaround)
290 reply_to_message(mdp, bcp, do_acknowledge);
294 return;
298 * Determine the first cpu on a pnode.
300 static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
302 int cpu;
303 struct hub_and_pnode *hpp;
305 for_each_present_cpu(cpu) {
306 hpp = &smaster->thp[cpu];
307 if (pnode == hpp->pnode)
308 return cpu;
310 return -1;
314 * Last resort when we get a large number of destination timeouts is
315 * to clear resources held by a given cpu.
316 * Do this with IPI so that all messages in the BAU message queue
317 * can be identified by their nonzero swack_vec field.
319 * This is entered for a single cpu on the uvhub.
320 * The sender want's this uvhub to free a specific message's
321 * swack resources.
323 static void do_reset(void *ptr)
325 int i;
326 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
327 struct reset_args *rap = (struct reset_args *)ptr;
328 struct bau_pq_entry *msg;
329 struct ptc_stats *stat = bcp->statp;
331 stat->d_resets++;
333 * We're looking for the given sender, and
334 * will free its swack resource.
335 * If all cpu's finally responded after the timeout, its
336 * message 'replied_to' was set.
338 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
339 unsigned long msg_res;
340 /* do_reset: same conditions for cancellation as
341 bau_process_retry_msg() */
342 if ((msg->replied_to == 0) &&
343 (msg->canceled == 0) &&
344 (msg->sending_cpu == rap->sender) &&
345 (msg->swack_vec) &&
346 (msg->msg_type != MSG_NOOP)) {
347 unsigned long mmr;
348 unsigned long mr;
350 * make everyone else ignore this message
352 msg->canceled = 1;
354 * only reset the resource if it is still pending
356 mmr = read_mmr_sw_ack();
357 msg_res = msg->swack_vec;
358 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
359 if (mmr & msg_res) {
360 stat->d_rcanceled++;
361 write_mmr_sw_ack(mr);
365 return;
369 * Use IPI to get all target uvhubs to release resources held by
370 * a given sending cpu number.
372 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
374 int pnode;
375 int apnode;
376 int maskbits;
377 int sender = bcp->cpu;
378 cpumask_t *mask = bcp->uvhub_master->cpumask;
379 struct bau_control *smaster = bcp->socket_master;
380 struct reset_args reset_args;
382 reset_args.sender = sender;
383 cpus_clear(*mask);
384 /* find a single cpu for each uvhub in this distribution mask */
385 maskbits = sizeof(struct pnmask) * BITSPERBYTE;
386 /* each bit is a pnode relative to the partition base pnode */
387 for (pnode = 0; pnode < maskbits; pnode++) {
388 int cpu;
389 if (!bau_uvhub_isset(pnode, distribution))
390 continue;
391 apnode = pnode + bcp->partition_base_pnode;
392 cpu = pnode_to_first_cpu(apnode, smaster);
393 cpu_set(cpu, *mask);
396 /* IPI all cpus; preemption is already disabled */
397 smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
398 return;
401 static inline unsigned long cycles_2_us(unsigned long long cyc)
403 unsigned long long ns;
404 unsigned long us;
405 int cpu = smp_processor_id();
407 ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
408 us = ns / 1000;
409 return us;
413 * wait for all cpus on this hub to finish their sends and go quiet
414 * leaves uvhub_quiesce set so that no new broadcasts are started by
415 * bau_flush_send_and_wait()
417 static inline void quiesce_local_uvhub(struct bau_control *hmaster)
419 atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
423 * mark this quiet-requestor as done
425 static inline void end_uvhub_quiesce(struct bau_control *hmaster)
427 atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
430 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
432 unsigned long descriptor_status;
434 descriptor_status = uv_read_local_mmr(mmr_offset);
435 descriptor_status >>= right_shift;
436 descriptor_status &= UV_ACT_STATUS_MASK;
437 return descriptor_status;
441 * Wait for completion of a broadcast software ack message
442 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
444 static int uv1_wait_completion(struct bau_desc *bau_desc,
445 unsigned long mmr_offset, int right_shift,
446 struct bau_control *bcp, long try)
448 unsigned long descriptor_status;
449 cycles_t ttm;
450 struct ptc_stats *stat = bcp->statp;
452 descriptor_status = uv1_read_status(mmr_offset, right_shift);
453 /* spin on the status MMR, waiting for it to go idle */
454 while ((descriptor_status != DS_IDLE)) {
456 * Our software ack messages may be blocked because
457 * there are no swack resources available. As long
458 * as none of them has timed out hardware will NACK
459 * our message and its state will stay IDLE.
461 if (descriptor_status == DS_SOURCE_TIMEOUT) {
462 stat->s_stimeout++;
463 return FLUSH_GIVEUP;
464 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
465 stat->s_dtimeout++;
466 ttm = get_cycles();
469 * Our retries may be blocked by all destination
470 * swack resources being consumed, and a timeout
471 * pending. In that case hardware returns the
472 * ERROR that looks like a destination timeout.
474 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
475 bcp->conseccompletes = 0;
476 return FLUSH_RETRY_PLUGGED;
479 bcp->conseccompletes = 0;
480 return FLUSH_RETRY_TIMEOUT;
481 } else {
483 * descriptor_status is still BUSY
485 cpu_relax();
487 descriptor_status = uv1_read_status(mmr_offset, right_shift);
489 bcp->conseccompletes++;
490 return FLUSH_COMPLETE;
494 * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
496 static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
498 unsigned long descriptor_status;
499 unsigned long descriptor_status2;
501 descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
502 descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
503 descriptor_status = (descriptor_status << 1) | descriptor_status2;
504 return descriptor_status;
508 * Return whether the status of the descriptor that is normally used for this
509 * cpu (the one indexed by its hub-relative cpu number) is busy.
510 * The status of the original 32 descriptors is always reflected in the 64
511 * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
512 * The bit provided by the activation_status_2 register is irrelevant to
513 * the status if it is only being tested for busy or not busy.
515 int normal_busy(struct bau_control *bcp)
517 int cpu = bcp->uvhub_cpu;
518 int mmr_offset;
519 int right_shift;
521 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
522 right_shift = cpu * UV_ACT_STATUS_SIZE;
523 return (((((read_lmmr(mmr_offset) >> right_shift) &
524 UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
528 * Entered when a bau descriptor has gone into a permanent busy wait because
529 * of a hardware bug.
530 * Workaround the bug.
532 int handle_uv2_busy(struct bau_control *bcp)
534 int busy_one = bcp->using_desc;
535 int normal = bcp->uvhub_cpu;
536 int selected = -1;
537 int i;
538 unsigned long descriptor_status;
539 unsigned long status;
540 int mmr_offset;
541 struct bau_desc *bau_desc_old;
542 struct bau_desc *bau_desc_new;
543 struct bau_control *hmaster = bcp->uvhub_master;
544 struct ptc_stats *stat = bcp->statp;
545 cycles_t ttm;
547 stat->s_uv2_wars++;
548 spin_lock(&hmaster->uvhub_lock);
549 /* try for the original first */
550 if (busy_one != normal) {
551 if (!normal_busy(bcp))
552 selected = normal;
554 if (selected < 0) {
555 /* can't use the normal, select an alternate */
556 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
557 descriptor_status = read_lmmr(mmr_offset);
559 /* scan available descriptors 32-63 */
560 for (i = 0; i < UV_CPUS_PER_AS; i++) {
561 if ((hmaster->inuse_map & (1 << i)) == 0) {
562 status = ((descriptor_status >>
563 (i * UV_ACT_STATUS_SIZE)) &
564 UV_ACT_STATUS_MASK) << 1;
565 if (status != UV2H_DESC_BUSY) {
566 selected = i + UV_CPUS_PER_AS;
567 break;
573 if (busy_one != normal)
574 /* mark the busy alternate as not in-use */
575 hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
577 if (selected >= 0) {
578 /* switch to the selected descriptor */
579 if (selected != normal) {
580 /* set the selected alternate as in-use */
581 hmaster->inuse_map |=
582 (1 << (selected - UV_CPUS_PER_AS));
583 if (selected > stat->s_uv2_wars_hw)
584 stat->s_uv2_wars_hw = selected;
586 bau_desc_old = bcp->descriptor_base;
587 bau_desc_old += (ITEMS_PER_DESC * busy_one);
588 bcp->using_desc = selected;
589 bau_desc_new = bcp->descriptor_base;
590 bau_desc_new += (ITEMS_PER_DESC * selected);
591 *bau_desc_new = *bau_desc_old;
592 } else {
594 * All are busy. Wait for the normal one for this cpu to
595 * free up.
597 stat->s_uv2_war_waits++;
598 spin_unlock(&hmaster->uvhub_lock);
599 ttm = get_cycles();
600 do {
601 cpu_relax();
602 } while (normal_busy(bcp));
603 spin_lock(&hmaster->uvhub_lock);
604 /* switch to the original descriptor */
605 bcp->using_desc = normal;
606 bau_desc_old = bcp->descriptor_base;
607 bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
608 bcp->using_desc = (ITEMS_PER_DESC * normal);
609 bau_desc_new = bcp->descriptor_base;
610 bau_desc_new += (ITEMS_PER_DESC * normal);
611 *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
613 spin_unlock(&hmaster->uvhub_lock);
614 return FLUSH_RETRY_BUSYBUG;
617 static int uv2_wait_completion(struct bau_desc *bau_desc,
618 unsigned long mmr_offset, int right_shift,
619 struct bau_control *bcp, long try)
621 unsigned long descriptor_stat;
622 cycles_t ttm;
623 int desc = bcp->using_desc;
624 long busy_reps = 0;
625 struct ptc_stats *stat = bcp->statp;
627 descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
629 /* spin on the status MMR, waiting for it to go idle */
630 while (descriptor_stat != UV2H_DESC_IDLE) {
632 * Our software ack messages may be blocked because
633 * there are no swack resources available. As long
634 * as none of them has timed out hardware will NACK
635 * our message and its state will stay IDLE.
637 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
638 (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
639 (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
640 stat->s_stimeout++;
641 return FLUSH_GIVEUP;
642 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
643 stat->s_dtimeout++;
644 ttm = get_cycles();
646 * Our retries may be blocked by all destination
647 * swack resources being consumed, and a timeout
648 * pending. In that case hardware returns the
649 * ERROR that looks like a destination timeout.
651 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
652 bcp->conseccompletes = 0;
653 return FLUSH_RETRY_PLUGGED;
655 bcp->conseccompletes = 0;
656 return FLUSH_RETRY_TIMEOUT;
657 } else {
658 busy_reps++;
659 if (busy_reps > 1000000) {
660 /* not to hammer on the clock */
661 busy_reps = 0;
662 ttm = get_cycles();
663 if ((ttm - bcp->send_message) >
664 (bcp->clocks_per_100_usec)) {
665 return handle_uv2_busy(bcp);
669 * descriptor_stat is still BUSY
671 cpu_relax();
673 descriptor_stat = uv2_read_status(mmr_offset, right_shift,
674 desc);
676 bcp->conseccompletes++;
677 return FLUSH_COMPLETE;
681 * There are 2 status registers; each and array[32] of 2 bits. Set up for
682 * which register to read and position in that register based on cpu in
683 * current hub.
685 static int wait_completion(struct bau_desc *bau_desc,
686 struct bau_control *bcp, long try)
688 int right_shift;
689 unsigned long mmr_offset;
690 int desc = bcp->using_desc;
692 if (desc < UV_CPUS_PER_AS) {
693 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
694 right_shift = desc * UV_ACT_STATUS_SIZE;
695 } else {
696 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
697 right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
700 if (bcp->uvhub_version == 1)
701 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
702 bcp, try);
703 else
704 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
705 bcp, try);
708 static inline cycles_t sec_2_cycles(unsigned long sec)
710 unsigned long ns;
711 cycles_t cyc;
713 ns = sec * 1000000000;
714 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
715 return cyc;
719 * Our retries are blocked by all destination sw ack resources being
720 * in use, and a timeout is pending. In that case hardware immediately
721 * returns the ERROR that looks like a destination timeout.
723 static void destination_plugged(struct bau_desc *bau_desc,
724 struct bau_control *bcp,
725 struct bau_control *hmaster, struct ptc_stats *stat)
727 udelay(bcp->plugged_delay);
728 bcp->plugged_tries++;
730 if (bcp->plugged_tries >= bcp->plugsb4reset) {
731 bcp->plugged_tries = 0;
733 quiesce_local_uvhub(hmaster);
735 spin_lock(&hmaster->queue_lock);
736 reset_with_ipi(&bau_desc->distribution, bcp);
737 spin_unlock(&hmaster->queue_lock);
739 end_uvhub_quiesce(hmaster);
741 bcp->ipi_attempts++;
742 stat->s_resets_plug++;
746 static void destination_timeout(struct bau_desc *bau_desc,
747 struct bau_control *bcp, struct bau_control *hmaster,
748 struct ptc_stats *stat)
750 hmaster->max_concurr = 1;
751 bcp->timeout_tries++;
752 if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
753 bcp->timeout_tries = 0;
755 quiesce_local_uvhub(hmaster);
757 spin_lock(&hmaster->queue_lock);
758 reset_with_ipi(&bau_desc->distribution, bcp);
759 spin_unlock(&hmaster->queue_lock);
761 end_uvhub_quiesce(hmaster);
763 bcp->ipi_attempts++;
764 stat->s_resets_timeout++;
769 * Completions are taking a very long time due to a congested numalink
770 * network.
772 static void disable_for_congestion(struct bau_control *bcp,
773 struct ptc_stats *stat)
775 /* let only one cpu do this disabling */
776 spin_lock(&disable_lock);
778 if (!baudisabled && bcp->period_requests &&
779 ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
780 int tcpu;
781 struct bau_control *tbcp;
782 /* it becomes this cpu's job to turn on the use of the
783 BAU again */
784 baudisabled = 1;
785 bcp->set_bau_off = 1;
786 bcp->set_bau_on_time = get_cycles();
787 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
788 stat->s_bau_disabled++;
789 for_each_present_cpu(tcpu) {
790 tbcp = &per_cpu(bau_control, tcpu);
791 tbcp->baudisabled = 1;
795 spin_unlock(&disable_lock);
798 static void count_max_concurr(int stat, struct bau_control *bcp,
799 struct bau_control *hmaster)
801 bcp->plugged_tries = 0;
802 bcp->timeout_tries = 0;
803 if (stat != FLUSH_COMPLETE)
804 return;
805 if (bcp->conseccompletes <= bcp->complete_threshold)
806 return;
807 if (hmaster->max_concurr >= hmaster->max_concurr_const)
808 return;
809 hmaster->max_concurr++;
812 static void record_send_stats(cycles_t time1, cycles_t time2,
813 struct bau_control *bcp, struct ptc_stats *stat,
814 int completion_status, int try)
816 cycles_t elapsed;
818 if (time2 > time1) {
819 elapsed = time2 - time1;
820 stat->s_time += elapsed;
822 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
823 bcp->period_requests++;
824 bcp->period_time += elapsed;
825 if ((elapsed > congested_cycles) &&
826 (bcp->period_requests > bcp->cong_reps))
827 disable_for_congestion(bcp, stat);
829 } else
830 stat->s_requestor--;
832 if (completion_status == FLUSH_COMPLETE && try > 1)
833 stat->s_retriesok++;
834 else if (completion_status == FLUSH_GIVEUP)
835 stat->s_giveup++;
839 * Because of a uv1 hardware bug only a limited number of concurrent
840 * requests can be made.
842 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
844 spinlock_t *lock = &hmaster->uvhub_lock;
845 atomic_t *v;
847 v = &hmaster->active_descriptor_count;
848 if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
849 stat->s_throttles++;
850 do {
851 cpu_relax();
852 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
857 * Handle the completion status of a message send.
859 static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
860 struct bau_control *bcp, struct bau_control *hmaster,
861 struct ptc_stats *stat)
863 if (completion_status == FLUSH_RETRY_PLUGGED)
864 destination_plugged(bau_desc, bcp, hmaster, stat);
865 else if (completion_status == FLUSH_RETRY_TIMEOUT)
866 destination_timeout(bau_desc, bcp, hmaster, stat);
870 * Send a broadcast and wait for it to complete.
872 * The flush_mask contains the cpus the broadcast is to be sent to including
873 * cpus that are on the local uvhub.
875 * Returns 0 if all flushing represented in the mask was done.
876 * Returns 1 if it gives up entirely and the original cpu mask is to be
877 * returned to the kernel.
879 int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
881 int seq_number = 0;
882 int completion_stat = 0;
883 int uv1 = 0;
884 long try = 0;
885 unsigned long index;
886 cycles_t time1;
887 cycles_t time2;
888 struct ptc_stats *stat = bcp->statp;
889 struct bau_control *hmaster = bcp->uvhub_master;
890 struct uv1_bau_msg_header *uv1_hdr = NULL;
891 struct uv2_bau_msg_header *uv2_hdr = NULL;
892 struct bau_desc *bau_desc;
894 if (bcp->uvhub_version == 1)
895 uv1_throttle(hmaster, stat);
897 while (hmaster->uvhub_quiesce)
898 cpu_relax();
900 time1 = get_cycles();
901 do {
902 bau_desc = bcp->descriptor_base;
903 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
904 if (bcp->uvhub_version == 1) {
905 uv1 = 1;
906 uv1_hdr = &bau_desc->header.uv1_hdr;
907 } else
908 uv2_hdr = &bau_desc->header.uv2_hdr;
909 if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
910 if (uv1)
911 uv1_hdr->msg_type = MSG_REGULAR;
912 else
913 uv2_hdr->msg_type = MSG_REGULAR;
914 seq_number = bcp->message_number++;
915 } else {
916 if (uv1)
917 uv1_hdr->msg_type = MSG_RETRY;
918 else
919 uv2_hdr->msg_type = MSG_RETRY;
920 stat->s_retry_messages++;
923 if (uv1)
924 uv1_hdr->sequence = seq_number;
925 else
926 uv2_hdr->sequence = seq_number;
927 index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
928 bcp->send_message = get_cycles();
930 write_mmr_activation(index);
932 try++;
933 completion_stat = wait_completion(bau_desc, bcp, try);
934 /* UV2: wait_completion() may change the bcp->using_desc */
936 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
938 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
939 bcp->ipi_attempts = 0;
940 completion_stat = FLUSH_GIVEUP;
941 break;
943 cpu_relax();
944 } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
945 (completion_stat == FLUSH_RETRY_BUSYBUG) ||
946 (completion_stat == FLUSH_RETRY_TIMEOUT));
948 time2 = get_cycles();
950 count_max_concurr(completion_stat, bcp, hmaster);
952 while (hmaster->uvhub_quiesce)
953 cpu_relax();
955 atomic_dec(&hmaster->active_descriptor_count);
957 record_send_stats(time1, time2, bcp, stat, completion_stat, try);
959 if (completion_stat == FLUSH_GIVEUP)
960 /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
961 return 1;
962 return 0;
966 * The BAU is disabled. When the disabled time period has expired, the cpu
967 * that disabled it must re-enable it.
968 * Return 0 if it is re-enabled for all cpus.
970 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
972 int tcpu;
973 struct bau_control *tbcp;
975 if (bcp->set_bau_off) {
976 if (get_cycles() >= bcp->set_bau_on_time) {
977 stat->s_bau_reenabled++;
978 baudisabled = 0;
979 for_each_present_cpu(tcpu) {
980 tbcp = &per_cpu(bau_control, tcpu);
981 tbcp->baudisabled = 0;
982 tbcp->period_requests = 0;
983 tbcp->period_time = 0;
985 return 0;
988 return -1;
991 static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
992 int remotes, struct bau_desc *bau_desc)
994 stat->s_requestor++;
995 stat->s_ntargcpu += remotes + locals;
996 stat->s_ntargremotes += remotes;
997 stat->s_ntarglocals += locals;
999 /* uvhub statistics */
1000 hubs = bau_uvhub_weight(&bau_desc->distribution);
1001 if (locals) {
1002 stat->s_ntarglocaluvhub++;
1003 stat->s_ntargremoteuvhub += (hubs - 1);
1004 } else
1005 stat->s_ntargremoteuvhub += hubs;
1007 stat->s_ntarguvhub += hubs;
1009 if (hubs >= 16)
1010 stat->s_ntarguvhub16++;
1011 else if (hubs >= 8)
1012 stat->s_ntarguvhub8++;
1013 else if (hubs >= 4)
1014 stat->s_ntarguvhub4++;
1015 else if (hubs >= 2)
1016 stat->s_ntarguvhub2++;
1017 else
1018 stat->s_ntarguvhub1++;
1022 * Translate a cpu mask to the uvhub distribution mask in the BAU
1023 * activation descriptor.
1025 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1026 struct bau_desc *bau_desc, int *localsp, int *remotesp)
1028 int cpu;
1029 int pnode;
1030 int cnt = 0;
1031 struct hub_and_pnode *hpp;
1033 for_each_cpu(cpu, flush_mask) {
1035 * The distribution vector is a bit map of pnodes, relative
1036 * to the partition base pnode (and the partition base nasid
1037 * in the header).
1038 * Translate cpu to pnode and hub using a local memory array.
1040 hpp = &bcp->socket_master->thp[cpu];
1041 pnode = hpp->pnode - bcp->partition_base_pnode;
1042 bau_uvhub_set(pnode, &bau_desc->distribution);
1043 cnt++;
1044 if (hpp->uvhub == bcp->uvhub)
1045 (*localsp)++;
1046 else
1047 (*remotesp)++;
1049 if (!cnt)
1050 return 1;
1051 return 0;
1055 * globally purge translation cache of a virtual address or all TLB's
1056 * @cpumask: mask of all cpu's in which the address is to be removed
1057 * @mm: mm_struct containing virtual address range
1058 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
1059 * @cpu: the current cpu
1061 * This is the entry point for initiating any UV global TLB shootdown.
1063 * Purges the translation caches of all specified processors of the given
1064 * virtual address, or purges all TLB's on specified processors.
1066 * The caller has derived the cpumask from the mm_struct. This function
1067 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
1069 * The cpumask is converted into a uvhubmask of the uvhubs containing
1070 * those cpus.
1072 * Note that this function should be called with preemption disabled.
1074 * Returns NULL if all remote flushing was done.
1075 * Returns pointer to cpumask if some remote flushing remains to be
1076 * done. The returned pointer is valid till preemption is re-enabled.
1078 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1079 struct mm_struct *mm, unsigned long va,
1080 unsigned int cpu)
1082 int locals = 0;
1083 int remotes = 0;
1084 int hubs = 0;
1085 struct bau_desc *bau_desc;
1086 struct cpumask *flush_mask;
1087 struct ptc_stats *stat;
1088 struct bau_control *bcp;
1090 /* kernel was booted 'nobau' */
1091 if (nobau)
1092 return cpumask;
1094 bcp = &per_cpu(bau_control, cpu);
1095 stat = bcp->statp;
1097 /* bau was disabled due to slow response */
1098 if (bcp->baudisabled) {
1099 if (check_enable(bcp, stat))
1100 return cpumask;
1104 * Each sending cpu has a per-cpu mask which it fills from the caller's
1105 * cpu mask. All cpus are converted to uvhubs and copied to the
1106 * activation descriptor.
1108 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
1109 /* don't actually do a shootdown of the local cpu */
1110 cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
1112 if (cpu_isset(cpu, *cpumask))
1113 stat->s_ntargself++;
1115 bau_desc = bcp->descriptor_base;
1116 bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
1117 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
1118 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
1119 return NULL;
1121 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1123 bau_desc->payload.address = va;
1124 bau_desc->payload.sending_cpu = cpu;
1126 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
1127 * or 1 if it gave up and the original cpumask should be returned.
1129 if (!uv_flush_send_and_wait(flush_mask, bcp))
1130 return NULL;
1131 else
1132 return cpumask;
1136 * Search the message queue for any 'other' message with the same software
1137 * acknowledge resource bit vector.
1139 struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
1140 struct bau_control *bcp, unsigned char swack_vec)
1142 struct bau_pq_entry *msg_next = msg + 1;
1144 if (msg_next > bcp->queue_last)
1145 msg_next = bcp->queue_first;
1146 while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
1147 if (msg_next->swack_vec == swack_vec)
1148 return msg_next;
1149 msg_next++;
1150 if (msg_next > bcp->queue_last)
1151 msg_next = bcp->queue_first;
1153 return NULL;
1157 * UV2 needs to work around a bug in which an arriving message has not
1158 * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
1159 * Such a message must be ignored.
1161 void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
1163 unsigned long mmr_image;
1164 unsigned char swack_vec;
1165 struct bau_pq_entry *msg = mdp->msg;
1166 struct bau_pq_entry *other_msg;
1168 mmr_image = read_mmr_sw_ack();
1169 swack_vec = msg->swack_vec;
1171 if ((swack_vec & mmr_image) == 0) {
1173 * This message was assigned a swack resource, but no
1174 * reserved acknowlegment is pending.
1175 * The bug has prevented this message from setting the MMR.
1176 * And no other message has used the same sw_ack resource.
1177 * Do the requested shootdown but do not reply to the msg.
1178 * (the 0 means make no acknowledge)
1180 bau_process_message(mdp, bcp, 0);
1181 return;
1185 * Some message has set the MMR 'pending' bit; it might have been
1186 * another message. Look for that message.
1188 other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
1189 if (other_msg) {
1190 /* There is another. Do not ack the current one. */
1191 bau_process_message(mdp, bcp, 0);
1193 * Let the natural processing of that message acknowledge
1194 * it. Don't get the processing of sw_ack's out of order.
1196 return;
1200 * There is no other message using this sw_ack, so it is safe to
1201 * acknowledge it.
1203 bau_process_message(mdp, bcp, 1);
1205 return;
1209 * The BAU message interrupt comes here. (registered by set_intr_gate)
1210 * See entry_64.S
1212 * We received a broadcast assist message.
1214 * Interrupts are disabled; this interrupt could represent
1215 * the receipt of several messages.
1217 * All cores/threads on this hub get this interrupt.
1218 * The last one to see it does the software ack.
1219 * (the resource will not be freed until noninterruptable cpus see this
1220 * interrupt; hardware may timeout the s/w ack and reply ERROR)
1222 void uv_bau_message_interrupt(struct pt_regs *regs)
1224 int count = 0;
1225 cycles_t time_start;
1226 struct bau_pq_entry *msg;
1227 struct bau_control *bcp;
1228 struct ptc_stats *stat;
1229 struct msg_desc msgdesc;
1231 time_start = get_cycles();
1233 bcp = &per_cpu(bau_control, smp_processor_id());
1234 stat = bcp->statp;
1236 msgdesc.queue_first = bcp->queue_first;
1237 msgdesc.queue_last = bcp->queue_last;
1239 msg = bcp->bau_msg_head;
1240 while (msg->swack_vec) {
1241 count++;
1243 msgdesc.msg_slot = msg - msgdesc.queue_first;
1244 msgdesc.msg = msg;
1245 if (bcp->uvhub_version == 2)
1246 process_uv2_message(&msgdesc, bcp);
1247 else
1248 bau_process_message(&msgdesc, bcp, 1);
1250 msg++;
1251 if (msg > msgdesc.queue_last)
1252 msg = msgdesc.queue_first;
1253 bcp->bau_msg_head = msg;
1255 stat->d_time += (get_cycles() - time_start);
1256 if (!count)
1257 stat->d_nomsg++;
1258 else if (count > 1)
1259 stat->d_multmsg++;
1261 ack_APIC_irq();
1265 * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1266 * shootdown message timeouts enabled. The timeout does not cause
1267 * an interrupt, but causes an error message to be returned to
1268 * the sender.
1270 static void __init enable_timeouts(void)
1272 int uvhub;
1273 int nuvhubs;
1274 int pnode;
1275 unsigned long mmr_image;
1277 nuvhubs = uv_num_possible_blades();
1279 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1280 if (!uv_blade_nr_possible_cpus(uvhub))
1281 continue;
1283 pnode = uv_blade_to_pnode(uvhub);
1284 mmr_image = read_mmr_misc_control(pnode);
1286 * Set the timeout period and then lock it in, in three
1287 * steps; captures and locks in the period.
1289 * To program the period, the SOFT_ACK_MODE must be off.
1291 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1292 write_mmr_misc_control(pnode, mmr_image);
1294 * Set the 4-bit period.
1296 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1297 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1298 write_mmr_misc_control(pnode, mmr_image);
1300 * UV1:
1301 * Subsequent reversals of the timebase bit (3) cause an
1302 * immediate timeout of one or all INTD resources as
1303 * indicated in bits 2:0 (7 causes all of them to timeout).
1305 mmr_image |= (1L << SOFTACK_MSHIFT);
1306 if (is_uv2_hub()) {
1307 mmr_image &= ~(1L << UV2_LEG_SHFT);
1308 mmr_image |= (1L << UV2_EXT_SHFT);
1310 write_mmr_misc_control(pnode, mmr_image);
1314 static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1316 if (*offset < num_possible_cpus())
1317 return offset;
1318 return NULL;
1321 static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1323 (*offset)++;
1324 if (*offset < num_possible_cpus())
1325 return offset;
1326 return NULL;
1329 static void ptc_seq_stop(struct seq_file *file, void *data)
1333 static inline unsigned long long usec_2_cycles(unsigned long microsec)
1335 unsigned long ns;
1336 unsigned long long cyc;
1338 ns = microsec * 1000;
1339 cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1340 return cyc;
1344 * Display the statistics thru /proc/sgi_uv/ptc_statistics
1345 * 'data' points to the cpu number
1346 * Note: see the descriptions in stat_description[].
1348 static int ptc_seq_show(struct seq_file *file, void *data)
1350 struct ptc_stats *stat;
1351 int cpu;
1353 cpu = *(loff_t *)data;
1354 if (!cpu) {
1355 seq_printf(file,
1356 "# cpu sent stime self locals remotes ncpus localhub ");
1357 seq_printf(file,
1358 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1359 seq_printf(file,
1360 "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
1361 seq_printf(file,
1362 "resetp resett giveup sto bz throt swack recv rtime ");
1363 seq_printf(file,
1364 "all one mult none retry canc nocan reset rcan ");
1365 seq_printf(file,
1366 "disable enable wars warshw warwaits\n");
1368 if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1369 stat = &per_cpu(ptcstats, cpu);
1370 /* source side statistics */
1371 seq_printf(file,
1372 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1373 cpu, stat->s_requestor, cycles_2_us(stat->s_time),
1374 stat->s_ntargself, stat->s_ntarglocals,
1375 stat->s_ntargremotes, stat->s_ntargcpu,
1376 stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1377 stat->s_ntarguvhub, stat->s_ntarguvhub16);
1378 seq_printf(file, "%ld %ld %ld %ld %ld ",
1379 stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1380 stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1381 stat->s_dtimeout);
1382 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1383 stat->s_retry_messages, stat->s_retriesok,
1384 stat->s_resets_plug, stat->s_resets_timeout,
1385 stat->s_giveup, stat->s_stimeout,
1386 stat->s_busy, stat->s_throttles);
1388 /* destination side statistics */
1389 seq_printf(file,
1390 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1391 read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
1392 stat->d_requestee, cycles_2_us(stat->d_time),
1393 stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1394 stat->d_nomsg, stat->d_retries, stat->d_canceled,
1395 stat->d_nocanceled, stat->d_resets,
1396 stat->d_rcanceled);
1397 seq_printf(file, "%ld %ld %ld %ld %ld\n",
1398 stat->s_bau_disabled, stat->s_bau_reenabled,
1399 stat->s_uv2_wars, stat->s_uv2_wars_hw,
1400 stat->s_uv2_war_waits);
1402 return 0;
1406 * Display the tunables thru debugfs
1408 static ssize_t tunables_read(struct file *file, char __user *userbuf,
1409 size_t count, loff_t *ppos)
1411 char *buf;
1412 int ret;
1414 buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1415 "max_concur plugged_delay plugsb4reset",
1416 "timeoutsb4reset ipi_reset_limit complete_threshold",
1417 "congested_response_us congested_reps congested_period",
1418 max_concurr, plugged_delay, plugsb4reset,
1419 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1420 congested_respns_us, congested_reps, congested_period);
1422 if (!buf)
1423 return -ENOMEM;
1425 ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1426 kfree(buf);
1427 return ret;
1431 * handle a write to /proc/sgi_uv/ptc_statistics
1432 * -1: reset the statistics
1433 * 0: display meaning of the statistics
1435 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1436 size_t count, loff_t *data)
1438 int cpu;
1439 int i;
1440 int elements;
1441 long input_arg;
1442 char optstr[64];
1443 struct ptc_stats *stat;
1445 if (count == 0 || count > sizeof(optstr))
1446 return -EINVAL;
1447 if (copy_from_user(optstr, user, count))
1448 return -EFAULT;
1449 optstr[count - 1] = '\0';
1451 if (strict_strtol(optstr, 10, &input_arg) < 0) {
1452 printk(KERN_DEBUG "%s is invalid\n", optstr);
1453 return -EINVAL;
1456 if (input_arg == 0) {
1457 elements = sizeof(stat_description)/sizeof(*stat_description);
1458 printk(KERN_DEBUG "# cpu: cpu number\n");
1459 printk(KERN_DEBUG "Sender statistics:\n");
1460 for (i = 0; i < elements; i++)
1461 printk(KERN_DEBUG "%s\n", stat_description[i]);
1462 } else if (input_arg == -1) {
1463 for_each_present_cpu(cpu) {
1464 stat = &per_cpu(ptcstats, cpu);
1465 memset(stat, 0, sizeof(struct ptc_stats));
1469 return count;
1472 static int local_atoi(const char *name)
1474 int val = 0;
1476 for (;; name++) {
1477 switch (*name) {
1478 case '0' ... '9':
1479 val = 10*val+(*name-'0');
1480 break;
1481 default:
1482 return val;
1488 * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1489 * Zero values reset them to defaults.
1491 static int parse_tunables_write(struct bau_control *bcp, char *instr,
1492 int count)
1494 char *p;
1495 char *q;
1496 int cnt = 0;
1497 int val;
1498 int e = sizeof(tunables) / sizeof(*tunables);
1500 p = instr + strspn(instr, WHITESPACE);
1501 q = p;
1502 for (; *p; p = q + strspn(q, WHITESPACE)) {
1503 q = p + strcspn(p, WHITESPACE);
1504 cnt++;
1505 if (q == p)
1506 break;
1508 if (cnt != e) {
1509 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
1510 return -EINVAL;
1513 p = instr + strspn(instr, WHITESPACE);
1514 q = p;
1515 for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1516 q = p + strcspn(p, WHITESPACE);
1517 val = local_atoi(p);
1518 switch (cnt) {
1519 case 0:
1520 if (val == 0) {
1521 max_concurr = MAX_BAU_CONCURRENT;
1522 max_concurr_const = MAX_BAU_CONCURRENT;
1523 continue;
1525 if (val < 1 || val > bcp->cpus_in_uvhub) {
1526 printk(KERN_DEBUG
1527 "Error: BAU max concurrent %d is invalid\n",
1528 val);
1529 return -EINVAL;
1531 max_concurr = val;
1532 max_concurr_const = val;
1533 continue;
1534 default:
1535 if (val == 0)
1536 *tunables[cnt].tunp = tunables[cnt].deflt;
1537 else
1538 *tunables[cnt].tunp = val;
1539 continue;
1541 if (q == p)
1542 break;
1544 return 0;
1548 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1550 static ssize_t tunables_write(struct file *file, const char __user *user,
1551 size_t count, loff_t *data)
1553 int cpu;
1554 int ret;
1555 char instr[100];
1556 struct bau_control *bcp;
1558 if (count == 0 || count > sizeof(instr)-1)
1559 return -EINVAL;
1560 if (copy_from_user(instr, user, count))
1561 return -EFAULT;
1563 instr[count] = '\0';
1565 cpu = get_cpu();
1566 bcp = &per_cpu(bau_control, cpu);
1567 ret = parse_tunables_write(bcp, instr, count);
1568 put_cpu();
1569 if (ret)
1570 return ret;
1572 for_each_present_cpu(cpu) {
1573 bcp = &per_cpu(bau_control, cpu);
1574 bcp->max_concurr = max_concurr;
1575 bcp->max_concurr_const = max_concurr;
1576 bcp->plugged_delay = plugged_delay;
1577 bcp->plugsb4reset = plugsb4reset;
1578 bcp->timeoutsb4reset = timeoutsb4reset;
1579 bcp->ipi_reset_limit = ipi_reset_limit;
1580 bcp->complete_threshold = complete_threshold;
1581 bcp->cong_response_us = congested_respns_us;
1582 bcp->cong_reps = congested_reps;
1583 bcp->cong_period = congested_period;
1585 return count;
1588 static const struct seq_operations uv_ptc_seq_ops = {
1589 .start = ptc_seq_start,
1590 .next = ptc_seq_next,
1591 .stop = ptc_seq_stop,
1592 .show = ptc_seq_show
1595 static int ptc_proc_open(struct inode *inode, struct file *file)
1597 return seq_open(file, &uv_ptc_seq_ops);
1600 static int tunables_open(struct inode *inode, struct file *file)
1602 return 0;
1605 static const struct file_operations proc_uv_ptc_operations = {
1606 .open = ptc_proc_open,
1607 .read = seq_read,
1608 .write = ptc_proc_write,
1609 .llseek = seq_lseek,
1610 .release = seq_release,
1613 static const struct file_operations tunables_fops = {
1614 .open = tunables_open,
1615 .read = tunables_read,
1616 .write = tunables_write,
1617 .llseek = default_llseek,
1620 static int __init uv_ptc_init(void)
1622 struct proc_dir_entry *proc_uv_ptc;
1624 if (!is_uv_system())
1625 return 0;
1627 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1628 &proc_uv_ptc_operations);
1629 if (!proc_uv_ptc) {
1630 printk(KERN_ERR "unable to create %s proc entry\n",
1631 UV_PTC_BASENAME);
1632 return -EINVAL;
1635 tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1636 if (!tunables_dir) {
1637 printk(KERN_ERR "unable to create debugfs directory %s\n",
1638 UV_BAU_TUNABLES_DIR);
1639 return -EINVAL;
1641 tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1642 tunables_dir, NULL, &tunables_fops);
1643 if (!tunables_file) {
1644 printk(KERN_ERR "unable to create debugfs file %s\n",
1645 UV_BAU_TUNABLES_FILE);
1646 return -EINVAL;
1648 return 0;
1652 * Initialize the sending side's sending buffers.
1654 static void activation_descriptor_init(int node, int pnode, int base_pnode)
1656 int i;
1657 int cpu;
1658 int uv1 = 0;
1659 unsigned long gpa;
1660 unsigned long m;
1661 unsigned long n;
1662 size_t dsize;
1663 struct bau_desc *bau_desc;
1664 struct bau_desc *bd2;
1665 struct uv1_bau_msg_header *uv1_hdr;
1666 struct uv2_bau_msg_header *uv2_hdr;
1667 struct bau_control *bcp;
1670 * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1671 * per cpu; and one per cpu on the uvhub (ADP_SZ)
1673 dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1674 bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1675 BUG_ON(!bau_desc);
1677 gpa = uv_gpa(bau_desc);
1678 n = uv_gpa_to_gnode(gpa);
1679 m = uv_gpa_to_offset(gpa);
1680 if (is_uv1_hub())
1681 uv1 = 1;
1683 /* the 14-bit pnode */
1684 write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1686 * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1687 * cpu even though we only use the first one; one descriptor can
1688 * describe a broadcast to 256 uv hubs.
1690 for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1691 memset(bd2, 0, sizeof(struct bau_desc));
1692 if (uv1) {
1693 uv1_hdr = &bd2->header.uv1_hdr;
1694 uv1_hdr->swack_flag = 1;
1696 * The base_dest_nasid set in the message header
1697 * is the nasid of the first uvhub in the partition.
1698 * The bit map will indicate destination pnode numbers
1699 * relative to that base. They may not be consecutive
1700 * if nasid striding is being used.
1702 uv1_hdr->base_dest_nasid =
1703 UV_PNODE_TO_NASID(base_pnode);
1704 uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1705 uv1_hdr->command = UV_NET_ENDPOINT_INTD;
1706 uv1_hdr->int_both = 1;
1708 * all others need to be set to zero:
1709 * fairness chaining multilevel count replied_to
1711 } else {
1712 uv2_hdr = &bd2->header.uv2_hdr;
1713 uv2_hdr->swack_flag = 1;
1714 uv2_hdr->base_dest_nasid =
1715 UV_PNODE_TO_NASID(base_pnode);
1716 uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID;
1717 uv2_hdr->command = UV_NET_ENDPOINT_INTD;
1720 for_each_present_cpu(cpu) {
1721 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1722 continue;
1723 bcp = &per_cpu(bau_control, cpu);
1724 bcp->descriptor_base = bau_desc;
1729 * initialize the destination side's receiving buffers
1730 * entered for each uvhub in the partition
1731 * - node is first node (kernel memory notion) on the uvhub
1732 * - pnode is the uvhub's physical identifier
1734 static void pq_init(int node, int pnode)
1736 int cpu;
1737 size_t plsize;
1738 char *cp;
1739 void *vp;
1740 unsigned long pn;
1741 unsigned long first;
1742 unsigned long pn_first;
1743 unsigned long last;
1744 struct bau_pq_entry *pqp;
1745 struct bau_control *bcp;
1747 plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1748 vp = kmalloc_node(plsize, GFP_KERNEL, node);
1749 pqp = (struct bau_pq_entry *)vp;
1750 BUG_ON(!pqp);
1752 cp = (char *)pqp + 31;
1753 pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1755 for_each_present_cpu(cpu) {
1756 if (pnode != uv_cpu_to_pnode(cpu))
1757 continue;
1758 /* for every cpu on this pnode: */
1759 bcp = &per_cpu(bau_control, cpu);
1760 bcp->queue_first = pqp;
1761 bcp->bau_msg_head = pqp;
1762 bcp->queue_last = pqp + (DEST_Q_SIZE - 1);
1765 * need the gnode of where the memory was really allocated
1767 pn = uv_gpa_to_gnode(uv_gpa(pqp));
1768 first = uv_physnodeaddr(pqp);
1769 pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1770 last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1771 write_mmr_payload_first(pnode, pn_first);
1772 write_mmr_payload_tail(pnode, first);
1773 write_mmr_payload_last(pnode, last);
1774 write_gmmr_sw_ack(pnode, 0xffffUL);
1776 /* in effect, all msg_type's are set to MSG_NOOP */
1777 memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1781 * Initialization of each UV hub's structures
1783 static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1785 int node;
1786 int pnode;
1787 unsigned long apicid;
1789 node = uvhub_to_first_node(uvhub);
1790 pnode = uv_blade_to_pnode(uvhub);
1792 activation_descriptor_init(node, pnode, base_pnode);
1794 pq_init(node, pnode);
1796 * The below initialization can't be in firmware because the
1797 * messaging IRQ will be determined by the OS.
1799 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1800 write_mmr_data_config(pnode, ((apicid << 32) | vector));
1804 * We will set BAU_MISC_CONTROL with a timeout period.
1805 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1806 * So the destination timeout period has to be calculated from them.
1808 static int calculate_destination_timeout(void)
1810 unsigned long mmr_image;
1811 int mult1;
1812 int mult2;
1813 int index;
1814 int base;
1815 int ret;
1816 unsigned long ts_ns;
1818 if (is_uv1_hub()) {
1819 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1820 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1821 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1822 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1823 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1824 base = timeout_base_ns[index];
1825 ts_ns = base * mult1 * mult2;
1826 ret = ts_ns / 1000;
1827 } else {
1828 /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
1829 mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
1830 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1831 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1832 base = 80;
1833 else
1834 base = 10;
1835 mult1 = mmr_image & UV2_ACK_MASK;
1836 ret = mult1 * base;
1838 return ret;
1841 static void __init init_per_cpu_tunables(void)
1843 int cpu;
1844 struct bau_control *bcp;
1846 for_each_present_cpu(cpu) {
1847 bcp = &per_cpu(bau_control, cpu);
1848 bcp->baudisabled = 0;
1849 bcp->statp = &per_cpu(ptcstats, cpu);
1850 /* time interval to catch a hardware stay-busy bug */
1851 bcp->timeout_interval = usec_2_cycles(2*timeout_us);
1852 bcp->max_concurr = max_concurr;
1853 bcp->max_concurr_const = max_concurr;
1854 bcp->plugged_delay = plugged_delay;
1855 bcp->plugsb4reset = plugsb4reset;
1856 bcp->timeoutsb4reset = timeoutsb4reset;
1857 bcp->ipi_reset_limit = ipi_reset_limit;
1858 bcp->complete_threshold = complete_threshold;
1859 bcp->cong_response_us = congested_respns_us;
1860 bcp->cong_reps = congested_reps;
1861 bcp->cong_period = congested_period;
1862 bcp->clocks_per_100_usec = usec_2_cycles(100);
1863 spin_lock_init(&bcp->queue_lock);
1864 spin_lock_init(&bcp->uvhub_lock);
1869 * Scan all cpus to collect blade and socket summaries.
1871 static int __init get_cpu_topology(int base_pnode,
1872 struct uvhub_desc *uvhub_descs,
1873 unsigned char *uvhub_mask)
1875 int cpu;
1876 int pnode;
1877 int uvhub;
1878 int socket;
1879 struct bau_control *bcp;
1880 struct uvhub_desc *bdp;
1881 struct socket_desc *sdp;
1883 for_each_present_cpu(cpu) {
1884 bcp = &per_cpu(bau_control, cpu);
1886 memset(bcp, 0, sizeof(struct bau_control));
1888 pnode = uv_cpu_hub_info(cpu)->pnode;
1889 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1890 printk(KERN_EMERG
1891 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1892 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1893 return 1;
1896 bcp->osnode = cpu_to_node(cpu);
1897 bcp->partition_base_pnode = base_pnode;
1899 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1900 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1901 bdp = &uvhub_descs[uvhub];
1903 bdp->num_cpus++;
1904 bdp->uvhub = uvhub;
1905 bdp->pnode = pnode;
1907 /* kludge: 'assuming' one node per socket, and assuming that
1908 disabling a socket just leaves a gap in node numbers */
1909 socket = bcp->osnode & 1;
1910 bdp->socket_mask |= (1 << socket);
1911 sdp = &bdp->socket[socket];
1912 sdp->cpu_number[sdp->num_cpus] = cpu;
1913 sdp->num_cpus++;
1914 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1915 printk(KERN_EMERG "%d cpus per socket invalid\n",
1916 sdp->num_cpus);
1917 return 1;
1920 return 0;
1924 * Each socket is to get a local array of pnodes/hubs.
1926 static void make_per_cpu_thp(struct bau_control *smaster)
1928 int cpu;
1929 size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1931 smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1932 memset(smaster->thp, 0, hpsz);
1933 for_each_present_cpu(cpu) {
1934 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1935 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1940 * Each uvhub is to get a local cpumask.
1942 static void make_per_hub_cpumask(struct bau_control *hmaster)
1944 int sz = sizeof(cpumask_t);
1946 hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1950 * Initialize all the per_cpu information for the cpu's on a given socket,
1951 * given what has been gathered into the socket_desc struct.
1952 * And reports the chosen hub and socket masters back to the caller.
1954 static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1955 struct bau_control **smasterp,
1956 struct bau_control **hmasterp)
1958 int i;
1959 int cpu;
1960 struct bau_control *bcp;
1962 for (i = 0; i < sdp->num_cpus; i++) {
1963 cpu = sdp->cpu_number[i];
1964 bcp = &per_cpu(bau_control, cpu);
1965 bcp->cpu = cpu;
1966 if (i == 0) {
1967 *smasterp = bcp;
1968 if (!(*hmasterp))
1969 *hmasterp = bcp;
1971 bcp->cpus_in_uvhub = bdp->num_cpus;
1972 bcp->cpus_in_socket = sdp->num_cpus;
1973 bcp->socket_master = *smasterp;
1974 bcp->uvhub = bdp->uvhub;
1975 if (is_uv1_hub())
1976 bcp->uvhub_version = 1;
1977 else if (is_uv2_hub())
1978 bcp->uvhub_version = 2;
1979 else {
1980 printk(KERN_EMERG "uvhub version not 1 or 2\n");
1981 return 1;
1983 bcp->uvhub_master = *hmasterp;
1984 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1985 bcp->using_desc = bcp->uvhub_cpu;
1986 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1987 printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1988 bcp->uvhub_cpu);
1989 return 1;
1992 return 0;
1996 * Summarize the blade and socket topology into the per_cpu structures.
1998 static int __init summarize_uvhub_sockets(int nuvhubs,
1999 struct uvhub_desc *uvhub_descs,
2000 unsigned char *uvhub_mask)
2002 int socket;
2003 int uvhub;
2004 unsigned short socket_mask;
2006 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2007 struct uvhub_desc *bdp;
2008 struct bau_control *smaster = NULL;
2009 struct bau_control *hmaster = NULL;
2011 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
2012 continue;
2014 bdp = &uvhub_descs[uvhub];
2015 socket_mask = bdp->socket_mask;
2016 socket = 0;
2017 while (socket_mask) {
2018 struct socket_desc *sdp;
2019 if ((socket_mask & 1)) {
2020 sdp = &bdp->socket[socket];
2021 if (scan_sock(sdp, bdp, &smaster, &hmaster))
2022 return 1;
2023 make_per_cpu_thp(smaster);
2025 socket++;
2026 socket_mask = (socket_mask >> 1);
2028 make_per_hub_cpumask(hmaster);
2030 return 0;
2034 * initialize the bau_control structure for each cpu
2036 static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
2038 unsigned char *uvhub_mask;
2039 void *vp;
2040 struct uvhub_desc *uvhub_descs;
2042 timeout_us = calculate_destination_timeout();
2044 vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
2045 uvhub_descs = (struct uvhub_desc *)vp;
2046 memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
2047 uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
2049 if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
2050 goto fail;
2052 if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
2053 goto fail;
2055 kfree(uvhub_descs);
2056 kfree(uvhub_mask);
2057 init_per_cpu_tunables();
2058 return 0;
2060 fail:
2061 kfree(uvhub_descs);
2062 kfree(uvhub_mask);
2063 return 1;
2067 * Initialization of BAU-related structures
2069 static int __init uv_bau_init(void)
2071 int uvhub;
2072 int pnode;
2073 int nuvhubs;
2074 int cur_cpu;
2075 int cpus;
2076 int vector;
2077 cpumask_var_t *mask;
2079 if (!is_uv_system())
2080 return 0;
2082 if (nobau)
2083 return 0;
2085 for_each_possible_cpu(cur_cpu) {
2086 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
2087 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
2090 nuvhubs = uv_num_possible_blades();
2091 spin_lock_init(&disable_lock);
2092 congested_cycles = usec_2_cycles(congested_respns_us);
2094 uv_base_pnode = 0x7fffffff;
2095 for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
2096 cpus = uv_blade_nr_possible_cpus(uvhub);
2097 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
2098 uv_base_pnode = uv_blade_to_pnode(uvhub);
2101 enable_timeouts();
2103 if (init_per_cpu(nuvhubs, uv_base_pnode)) {
2104 nobau = 1;
2105 return 0;
2108 vector = UV_BAU_MESSAGE;
2109 for_each_possible_blade(uvhub)
2110 if (uv_blade_nr_possible_cpus(uvhub))
2111 init_uvhub(uvhub, vector, uv_base_pnode);
2113 alloc_intr_gate(vector, uv_bau_message_intr1);
2115 for_each_possible_blade(uvhub) {
2116 if (uv_blade_nr_possible_cpus(uvhub)) {
2117 unsigned long val;
2118 unsigned long mmr;
2119 pnode = uv_blade_to_pnode(uvhub);
2120 /* INIT the bau */
2121 val = 1L << 63;
2122 write_gmmr_activation(pnode, val);
2123 mmr = 1; /* should be 1 to broadcast to both sockets */
2124 if (!is_uv1_hub())
2125 write_mmr_data_broadcast(pnode, mmr);
2129 return 0;
2131 core_initcall(uv_bau_init);
2132 fs_initcall(uv_ptc_init);