2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns
[] = {
38 static int timeout_us
;
40 static int baudisabled
;
41 static spinlock_t disable_lock
;
42 static cycles_t congested_cycles
;
45 static int max_bau_concurrent
= MAX_BAU_CONCURRENT
;
46 static int max_bau_concurrent_constant
= MAX_BAU_CONCURRENT
;
47 static int plugged_delay
= PLUGGED_DELAY
;
48 static int plugsb4reset
= PLUGSB4RESET
;
49 static int timeoutsb4reset
= TIMEOUTSB4RESET
;
50 static int ipi_reset_limit
= IPI_RESET_LIMIT
;
51 static int complete_threshold
= COMPLETE_THRESHOLD
;
52 static int congested_response_us
= CONGESTED_RESPONSE_US
;
53 static int congested_reps
= CONGESTED_REPS
;
54 static int congested_period
= CONGESTED_PERIOD
;
55 static struct dentry
*tunables_dir
;
56 static struct dentry
*tunables_file
;
58 static int __init
setup_nobau(char *arg
)
63 early_param("nobau", setup_nobau
);
65 /* base pnode in this partition */
66 static int uv_partition_base_pnode __read_mostly
;
67 /* position of pnode (which is nasid>>1): */
68 static int uv_nshift __read_mostly
;
69 static unsigned long uv_mmask __read_mostly
;
71 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
72 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
73 static DEFINE_PER_CPU(cpumask_var_t
, uv_flush_tlb_mask
);
76 * Determine the first node on a uvhub. 'Nodes' are used for kernel
79 static int __init
uvhub_to_first_node(int uvhub
)
83 for_each_online_node(node
) {
84 b
= uv_node_to_blade_id(node
);
92 * Determine the apicid of the first cpu on a uvhub.
94 static int __init
uvhub_to_first_apicid(int uvhub
)
98 for_each_present_cpu(cpu
)
99 if (uvhub
== uv_cpu_to_blade_id(cpu
))
100 return per_cpu(x86_cpu_to_apicid
, cpu
);
105 * Free a software acknowledge hardware resource by clearing its Pending
106 * bit. This will return a reply to the sender.
107 * If the message has timed out, a reply has already been sent by the
108 * hardware but the resource has not been released. In that case our
109 * clear of the Timeout bit (as well) will free the resource. No reply will
110 * be sent (the hardware will only do one reply per message).
112 static inline void uv_reply_to_message(struct msg_desc
*mdp
,
113 struct bau_control
*bcp
)
116 struct bau_payload_queue_entry
*msg
;
119 if (!msg
->canceled
) {
120 dw
= (msg
->sw_ack_vector
<< UV_SW_ACK_NPENDING
) |
123 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
, dw
);
126 msg
->sw_ack_vector
= 0;
130 * Process the receipt of a RETRY message
132 static inline void uv_bau_process_retry_msg(struct msg_desc
*mdp
,
133 struct bau_control
*bcp
)
136 int cancel_count
= 0;
138 unsigned long msg_res
;
139 unsigned long mmr
= 0;
140 struct bau_payload_queue_entry
*msg
;
141 struct bau_payload_queue_entry
*msg2
;
142 struct ptc_stats
*stat
;
148 * cancel any message from msg+1 to the retry itself
150 for (msg2
= msg
+1, i
= 0; i
< DEST_Q_SIZE
; msg2
++, i
++) {
151 if (msg2
> mdp
->va_queue_last
)
152 msg2
= mdp
->va_queue_first
;
156 /* same conditions for cancellation as uv_do_reset */
157 if ((msg2
->replied_to
== 0) && (msg2
->canceled
== 0) &&
158 (msg2
->sw_ack_vector
) && ((msg2
->sw_ack_vector
&
159 msg
->sw_ack_vector
) == 0) &&
160 (msg2
->sending_cpu
== msg
->sending_cpu
) &&
161 (msg2
->msg_type
!= MSG_NOOP
)) {
162 slot2
= msg2
- mdp
->va_queue_first
;
163 mmr
= uv_read_local_mmr
164 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
165 msg_res
= msg2
->sw_ack_vector
;
167 * This is a message retry; clear the resources held
168 * by the previous message only if they timed out.
169 * If it has not timed out we have an unexpected
170 * situation to report.
172 if (mmr
& (msg_res
<< UV_SW_ACK_NPENDING
)) {
174 * is the resource timed out?
175 * make everyone ignore the cancelled message.
181 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
182 (msg_res
<< UV_SW_ACK_NPENDING
) |
188 stat
->d_nocanceled
++;
192 * Do all the things a cpu should do for a TLB shootdown message.
193 * Other cpu's may come here at the same time for this message.
195 static void uv_bau_process_message(struct msg_desc
*mdp
,
196 struct bau_control
*bcp
)
199 short socket_ack_count
= 0;
200 struct ptc_stats
*stat
;
201 struct bau_payload_queue_entry
*msg
;
202 struct bau_control
*smaster
= bcp
->socket_master
;
205 * This must be a normal message, or retry of a normal message
209 if (msg
->address
== TLB_FLUSH_ALL
) {
213 __flush_tlb_one(msg
->address
);
219 * One cpu on each uvhub has the additional job on a RETRY
220 * of releasing the resource held by the message that is
221 * being retried. That message is identified by sending
224 if (msg
->msg_type
== MSG_RETRY
&& bcp
== bcp
->uvhub_master
)
225 uv_bau_process_retry_msg(mdp
, bcp
);
228 * This is a sw_ack message, so we have to reply to it.
229 * Count each responding cpu on the socket. This avoids
230 * pinging the count's cache line back and forth between
233 socket_ack_count
= atomic_add_short_return(1, (struct atomic_short
*)
234 &smaster
->socket_acknowledge_count
[mdp
->msg_slot
]);
235 if (socket_ack_count
== bcp
->cpus_in_socket
) {
237 * Both sockets dump their completed count total into
238 * the message's count.
240 smaster
->socket_acknowledge_count
[mdp
->msg_slot
] = 0;
241 msg_ack_count
= atomic_add_short_return(socket_ack_count
,
242 (struct atomic_short
*)&msg
->acknowledge_count
);
244 if (msg_ack_count
== bcp
->cpus_in_uvhub
) {
246 * All cpus in uvhub saw it; reply
248 uv_reply_to_message(mdp
, bcp
);
256 * Determine the first cpu on a uvhub.
258 static int uvhub_to_first_cpu(int uvhub
)
261 for_each_present_cpu(cpu
)
262 if (uvhub
== uv_cpu_to_blade_id(cpu
))
268 * Last resort when we get a large number of destination timeouts is
269 * to clear resources held by a given cpu.
270 * Do this with IPI so that all messages in the BAU message queue
271 * can be identified by their nonzero sw_ack_vector field.
273 * This is entered for a single cpu on the uvhub.
274 * The sender want's this uvhub to free a specific message's
278 uv_do_reset(void *ptr
)
284 unsigned long msg_res
;
285 struct bau_control
*bcp
;
286 struct reset_args
*rap
;
287 struct bau_payload_queue_entry
*msg
;
288 struct ptc_stats
*stat
;
290 bcp
= &per_cpu(bau_control
, smp_processor_id());
291 rap
= (struct reset_args
*)ptr
;
296 * We're looking for the given sender, and
297 * will free its sw_ack resource.
298 * If all cpu's finally responded after the timeout, its
299 * message 'replied_to' was set.
301 for (msg
= bcp
->va_queue_first
, i
= 0; i
< DEST_Q_SIZE
; msg
++, i
++) {
302 /* uv_do_reset: same conditions for cancellation as
303 uv_bau_process_retry_msg() */
304 if ((msg
->replied_to
== 0) &&
305 (msg
->canceled
== 0) &&
306 (msg
->sending_cpu
== rap
->sender
) &&
307 (msg
->sw_ack_vector
) &&
308 (msg
->msg_type
!= MSG_NOOP
)) {
310 * make everyone else ignore this message
313 slot
= msg
- bcp
->va_queue_first
;
316 * only reset the resource if it is still pending
318 mmr
= uv_read_local_mmr
319 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
320 msg_res
= msg
->sw_ack_vector
;
324 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
325 (msg_res
<< UV_SW_ACK_NPENDING
) |
334 * Use IPI to get all target uvhubs to release resources held by
335 * a given sending cpu number.
337 static void uv_reset_with_ipi(struct bau_target_uvhubmask
*distribution
,
343 struct reset_args reset_args
;
345 reset_args
.sender
= sender
;
348 /* find a single cpu for each uvhub in this distribution mask */
350 uvhub
< sizeof(struct bau_target_uvhubmask
) * BITSPERBYTE
;
352 if (!bau_uvhub_isset(uvhub
, distribution
))
354 /* find a cpu for this uvhub */
355 cpu
= uvhub_to_first_cpu(uvhub
);
358 /* IPI all cpus; Preemption is already disabled */
359 smp_call_function_many(&mask
, uv_do_reset
, (void *)&reset_args
, 1);
363 static inline unsigned long
364 cycles_2_us(unsigned long long cyc
)
366 unsigned long long ns
;
368 ns
= (cyc
* per_cpu(cyc2ns
, smp_processor_id()))
369 >> CYC2NS_SCALE_FACTOR
;
375 * wait for all cpus on this hub to finish their sends and go quiet
376 * leaves uvhub_quiesce set so that no new broadcasts are started by
377 * bau_flush_send_and_wait()
380 quiesce_local_uvhub(struct bau_control
*hmaster
)
382 atomic_add_short_return(1, (struct atomic_short
*)
383 &hmaster
->uvhub_quiesce
);
387 * mark this quiet-requestor as done
390 end_uvhub_quiesce(struct bau_control
*hmaster
)
392 atomic_add_short_return(-1, (struct atomic_short
*)
393 &hmaster
->uvhub_quiesce
);
397 * Wait for completion of a broadcast software ack message
398 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
400 static int uv1_wait_completion(struct bau_desc
*bau_desc
,
401 unsigned long mmr_offset
, int right_shift
, int this_cpu
,
402 struct bau_control
*bcp
, struct bau_control
*smaster
, long try)
404 unsigned long descriptor_status
;
406 struct ptc_stats
*stat
= bcp
->statp
;
408 /* spin on the status MMR, waiting for it to go idle */
409 while ((descriptor_status
= (((unsigned long)
410 uv_read_local_mmr(mmr_offset
) >>
411 right_shift
) & UV_ACT_STATUS_MASK
)) !=
414 * Our software ack messages may be blocked because
415 * there are no swack resources available. As long
416 * as none of them has timed out hardware will NACK
417 * our message and its state will stay IDLE.
419 if (descriptor_status
== DESC_STATUS_SOURCE_TIMEOUT
) {
422 } else if (descriptor_status
==
423 DESC_STATUS_DESTINATION_TIMEOUT
) {
425 ttime
= get_cycles();
428 * Our retries may be blocked by all destination
429 * swack resources being consumed, and a timeout
430 * pending. In that case hardware returns the
431 * ERROR that looks like a destination timeout.
433 if (cycles_2_us(ttime
- bcp
->send_message
) <
435 bcp
->conseccompletes
= 0;
436 return FLUSH_RETRY_PLUGGED
;
439 bcp
->conseccompletes
= 0;
440 return FLUSH_RETRY_TIMEOUT
;
443 * descriptor_status is still BUSY
448 bcp
->conseccompletes
++;
449 return FLUSH_COMPLETE
;
452 static int uv2_wait_completion(struct bau_desc
*bau_desc
,
453 unsigned long mmr_offset
, int right_shift
, int this_cpu
,
454 struct bau_control
*bcp
, struct bau_control
*smaster
, long try)
456 unsigned long descriptor_status
;
457 unsigned long descriptor_status2
;
460 struct ptc_stats
*stat
= bcp
->statp
;
462 /* UV2 has an extra bit of status */
463 cpu
= bcp
->uvhub_cpu
;
464 /* spin on the status MMR, waiting for it to go idle */
465 descriptor_status
= (((unsigned long)(uv_read_local_mmr
466 (mmr_offset
)) >> right_shift
) & UV_ACT_STATUS_MASK
);
467 descriptor_status2
= (((unsigned long)uv_read_local_mmr
468 (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2
) >> cpu
) & 0x1UL
);
469 descriptor_status
= (descriptor_status
<< 1) |
471 while (descriptor_status
!= UV2H_DESC_IDLE
) {
473 * Our software ack messages may be blocked because
474 * there are no swack resources available. As long
475 * as none of them has timed out hardware will NACK
476 * our message and its state will stay IDLE.
478 if ((descriptor_status
== UV2H_DESC_SOURCE_TIMEOUT
) ||
479 (descriptor_status
== UV2H_DESC_DEST_STRONG_NACK
) ||
480 (descriptor_status
== UV2H_DESC_DEST_PUT_ERR
)) {
483 } else if (descriptor_status
== UV2H_DESC_DEST_TIMEOUT
) {
485 ttime
= get_cycles();
488 * Our retries may be blocked by all destination
489 * swack resources being consumed, and a timeout
490 * pending. In that case hardware returns the
491 * ERROR that looks like a destination timeout.
493 if (cycles_2_us(ttime
- bcp
->send_message
) <
495 bcp
->conseccompletes
= 0;
496 return FLUSH_RETRY_PLUGGED
;
499 bcp
->conseccompletes
= 0;
500 return FLUSH_RETRY_TIMEOUT
;
503 * descriptor_status is still BUSY
507 descriptor_status
= (((unsigned long)(uv_read_local_mmr
508 (mmr_offset
)) >> right_shift
) &
510 descriptor_status2
= (((unsigned long)uv_read_local_mmr
511 (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2
) >> cpu
) &
513 descriptor_status
= (descriptor_status
<< 1) |
516 bcp
->conseccompletes
++;
517 return FLUSH_COMPLETE
;
520 static int uv_wait_completion(struct bau_desc
*bau_desc
,
521 unsigned long mmr_offset
, int right_shift
, int this_cpu
,
522 struct bau_control
*bcp
, struct bau_control
*smaster
, long try)
525 return uv1_wait_completion(bau_desc
, mmr_offset
, right_shift
,
526 this_cpu
, bcp
, smaster
, try);
528 return uv2_wait_completion(bau_desc
, mmr_offset
, right_shift
,
529 this_cpu
, bcp
, smaster
, try);
532 static inline cycles_t
533 sec_2_cycles(unsigned long sec
)
538 ns
= sec
* 1000000000;
539 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
544 * conditionally add 1 to *v, unless *v is >= u
545 * return 0 if we cannot add 1 to *v because it is >= u
546 * return 1 if we can add 1 to *v because it is < u
549 * This is close to atomic_add_unless(), but this allows the 'u' value
550 * to be lowered below the current 'v'. atomic_add_unless can only stop
553 static inline int atomic_inc_unless_ge(spinlock_t
*lock
, atomic_t
*v
, int u
)
556 if (atomic_read(v
) >= u
) {
566 * Our retries are blocked by all destination swack resources being
567 * in use, and a timeout is pending. In that case hardware immediately
568 * returns the ERROR that looks like a destination timeout.
571 destination_plugged(struct bau_desc
*bau_desc
, struct bau_control
*bcp
,
572 struct bau_control
*hmaster
, struct ptc_stats
*stat
)
574 udelay(bcp
->plugged_delay
);
575 bcp
->plugged_tries
++;
576 if (bcp
->plugged_tries
>= bcp
->plugsb4reset
) {
577 bcp
->plugged_tries
= 0;
578 quiesce_local_uvhub(hmaster
);
579 spin_lock(&hmaster
->queue_lock
);
580 uv_reset_with_ipi(&bau_desc
->distribution
, bcp
->cpu
);
581 spin_unlock(&hmaster
->queue_lock
);
582 end_uvhub_quiesce(hmaster
);
584 stat
->s_resets_plug
++;
589 destination_timeout(struct bau_desc
*bau_desc
, struct bau_control
*bcp
,
590 struct bau_control
*hmaster
, struct ptc_stats
*stat
)
592 hmaster
->max_bau_concurrent
= 1;
593 bcp
->timeout_tries
++;
594 if (bcp
->timeout_tries
>= bcp
->timeoutsb4reset
) {
595 bcp
->timeout_tries
= 0;
596 quiesce_local_uvhub(hmaster
);
597 spin_lock(&hmaster
->queue_lock
);
598 uv_reset_with_ipi(&bau_desc
->distribution
, bcp
->cpu
);
599 spin_unlock(&hmaster
->queue_lock
);
600 end_uvhub_quiesce(hmaster
);
602 stat
->s_resets_timeout
++;
607 * Completions are taking a very long time due to a congested numalink
611 disable_for_congestion(struct bau_control
*bcp
, struct ptc_stats
*stat
)
614 struct bau_control
*tbcp
;
616 /* let only one cpu do this disabling */
617 spin_lock(&disable_lock
);
618 if (!baudisabled
&& bcp
->period_requests
&&
619 ((bcp
->period_time
/ bcp
->period_requests
) > congested_cycles
)) {
620 /* it becomes this cpu's job to turn on the use of the
623 bcp
->set_bau_off
= 1;
624 bcp
->set_bau_on_time
= get_cycles() +
625 sec_2_cycles(bcp
->congested_period
);
626 stat
->s_bau_disabled
++;
627 for_each_present_cpu(tcpu
) {
628 tbcp
= &per_cpu(bau_control
, tcpu
);
629 tbcp
->baudisabled
= 1;
632 spin_unlock(&disable_lock
);
636 * uv_flush_send_and_wait
638 * Send a broadcast and wait for it to complete.
640 * The flush_mask contains the cpus the broadcast is to be sent to including
641 * cpus that are on the local uvhub.
643 * Returns 0 if all flushing represented in the mask was done.
644 * Returns 1 if it gives up entirely and the original cpu mask is to be
645 * returned to the kernel.
647 int uv_flush_send_and_wait(struct bau_desc
*bau_desc
,
648 struct cpumask
*flush_mask
, struct bau_control
*bcp
)
651 int completion_status
= 0;
654 int cpu
= bcp
->uvhub_cpu
;
655 int this_cpu
= bcp
->cpu
;
656 unsigned long mmr_offset
;
661 struct ptc_stats
*stat
= bcp
->statp
;
662 struct bau_control
*smaster
= bcp
->socket_master
;
663 struct bau_control
*hmaster
= bcp
->uvhub_master
;
666 !atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
667 &hmaster
->active_descriptor_count
,
668 hmaster
->max_bau_concurrent
)) {
672 } while (!atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
673 &hmaster
->active_descriptor_count
,
674 hmaster
->max_bau_concurrent
));
676 while (hmaster
->uvhub_quiesce
)
679 if (cpu
< UV_CPUS_PER_ACT_STATUS
) {
680 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
681 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
683 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
685 ((cpu
- UV_CPUS_PER_ACT_STATUS
) * UV_ACT_STATUS_SIZE
);
687 time1
= get_cycles();
690 bau_desc
->header
.msg_type
= MSG_REGULAR
;
691 seq_number
= bcp
->message_number
++;
693 bau_desc
->header
.msg_type
= MSG_RETRY
;
694 stat
->s_retry_messages
++;
696 bau_desc
->header
.sequence
= seq_number
;
697 index
= (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
) |
699 bcp
->send_message
= get_cycles();
700 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL
, index
);
702 completion_status
= uv_wait_completion(bau_desc
, mmr_offset
,
703 right_shift
, this_cpu
, bcp
, smaster
, try);
705 if (completion_status
== FLUSH_RETRY_PLUGGED
) {
706 destination_plugged(bau_desc
, bcp
, hmaster
, stat
);
707 } else if (completion_status
== FLUSH_RETRY_TIMEOUT
) {
708 destination_timeout(bau_desc
, bcp
, hmaster
, stat
);
710 if (bcp
->ipi_attempts
>= bcp
->ipi_reset_limit
) {
711 bcp
->ipi_attempts
= 0;
712 completion_status
= FLUSH_GIVEUP
;
716 } while ((completion_status
== FLUSH_RETRY_PLUGGED
) ||
717 (completion_status
== FLUSH_RETRY_TIMEOUT
));
718 time2
= get_cycles();
719 bcp
->plugged_tries
= 0;
720 bcp
->timeout_tries
= 0;
721 if ((completion_status
== FLUSH_COMPLETE
) &&
722 (bcp
->conseccompletes
> bcp
->complete_threshold
) &&
723 (hmaster
->max_bau_concurrent
<
724 hmaster
->max_bau_concurrent_constant
))
725 hmaster
->max_bau_concurrent
++;
726 while (hmaster
->uvhub_quiesce
)
728 atomic_dec(&hmaster
->active_descriptor_count
);
730 elapsed
= time2
- time1
;
731 stat
->s_time
+= elapsed
;
732 if ((completion_status
== FLUSH_COMPLETE
) && (try == 1)) {
733 bcp
->period_requests
++;
734 bcp
->period_time
+= elapsed
;
735 if ((elapsed
> congested_cycles
) &&
736 (bcp
->period_requests
> bcp
->congested_reps
)) {
737 disable_for_congestion(bcp
, stat
);
742 if (completion_status
== FLUSH_COMPLETE
&& try > 1)
744 else if (completion_status
== FLUSH_GIVEUP
) {
752 * uv_flush_tlb_others - globally purge translation cache of a virtual
753 * address or all TLB's
754 * @cpumask: mask of all cpu's in which the address is to be removed
755 * @mm: mm_struct containing virtual address range
756 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
757 * @cpu: the current cpu
759 * This is the entry point for initiating any UV global TLB shootdown.
761 * Purges the translation caches of all specified processors of the given
762 * virtual address, or purges all TLB's on specified processors.
764 * The caller has derived the cpumask from the mm_struct. This function
765 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
767 * The cpumask is converted into a uvhubmask of the uvhubs containing
770 * Note that this function should be called with preemption disabled.
772 * Returns NULL if all remote flushing was done.
773 * Returns pointer to cpumask if some remote flushing remains to be
774 * done. The returned pointer is valid till preemption is re-enabled.
776 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
777 struct mm_struct
*mm
,
778 unsigned long va
, unsigned int cpu
)
785 struct bau_desc
*bau_desc
;
786 struct cpumask
*flush_mask
;
787 struct ptc_stats
*stat
;
788 struct bau_control
*bcp
;
789 struct bau_control
*tbcp
;
790 struct hub_and_pnode
*hpp
;
792 /* kernel was booted 'nobau' */
796 bcp
= &per_cpu(bau_control
, cpu
);
799 /* bau was disabled due to slow response */
800 if (bcp
->baudisabled
) {
801 /* the cpu that disabled it must re-enable it */
802 if (bcp
->set_bau_off
) {
803 if (get_cycles() >= bcp
->set_bau_on_time
) {
804 stat
->s_bau_reenabled
++;
806 for_each_present_cpu(tcpu
) {
807 tbcp
= &per_cpu(bau_control
, tcpu
);
808 tbcp
->baudisabled
= 0;
809 tbcp
->period_requests
= 0;
810 tbcp
->period_time
= 0;
818 * Each sending cpu has a per-cpu mask which it fills from the caller's
819 * cpu mask. All cpus are converted to uvhubs and copied to the
820 * activation descriptor.
822 flush_mask
= (struct cpumask
*)per_cpu(uv_flush_tlb_mask
, cpu
);
823 /* don't actually do a shootdown of the local cpu */
824 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
825 if (cpu_isset(cpu
, *cpumask
))
828 bau_desc
= bcp
->descriptor_base
;
829 bau_desc
+= UV_ITEMS_PER_DESCRIPTOR
* bcp
->uvhub_cpu
;
830 bau_uvhubs_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
832 for_each_cpu(tcpu
, flush_mask
) {
834 * The distribution vector is a bit map of pnodes, relative
835 * to the partition base pnode (and the partition base nasid
837 * Translate cpu to pnode and hub using an array stored
840 hpp
= &bcp
->socket_master
->target_hub_and_pnode
[tcpu
];
841 tpnode
= hpp
->pnode
- bcp
->partition_base_pnode
;
842 bau_uvhub_set(tpnode
, &bau_desc
->distribution
);
843 if (hpp
->uvhub
== bcp
->uvhub
)
848 if ((locals
+ remotes
) == 0)
851 stat
->s_ntargcpu
+= remotes
+ locals
;
852 stat
->s_ntargremotes
+= remotes
;
853 stat
->s_ntarglocals
+= locals
;
854 remotes
= bau_uvhub_weight(&bau_desc
->distribution
);
856 /* uvhub statistics */
857 hubs
= bau_uvhub_weight(&bau_desc
->distribution
);
859 stat
->s_ntarglocaluvhub
++;
860 stat
->s_ntargremoteuvhub
+= (hubs
- 1);
862 stat
->s_ntargremoteuvhub
+= hubs
;
863 stat
->s_ntarguvhub
+= hubs
;
865 stat
->s_ntarguvhub16
++;
867 stat
->s_ntarguvhub8
++;
869 stat
->s_ntarguvhub4
++;
871 stat
->s_ntarguvhub2
++;
873 stat
->s_ntarguvhub1
++;
875 bau_desc
->payload
.address
= va
;
876 bau_desc
->payload
.sending_cpu
= cpu
;
879 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
880 * or 1 if it gave up and the original cpumask should be returned.
882 if (!uv_flush_send_and_wait(bau_desc
, flush_mask
, bcp
))
889 * The BAU message interrupt comes here. (registered by set_intr_gate)
892 * We received a broadcast assist message.
894 * Interrupts are disabled; this interrupt could represent
895 * the receipt of several messages.
897 * All cores/threads on this hub get this interrupt.
898 * The last one to see it does the software ack.
899 * (the resource will not be freed until noninterruptable cpus see this
900 * interrupt; hardware may timeout the s/w ack and reply ERROR)
902 void uv_bau_message_interrupt(struct pt_regs
*regs
)
906 struct bau_payload_queue_entry
*msg
;
907 struct bau_control
*bcp
;
908 struct ptc_stats
*stat
;
909 struct msg_desc msgdesc
;
911 time_start
= get_cycles();
912 bcp
= &per_cpu(bau_control
, smp_processor_id());
914 msgdesc
.va_queue_first
= bcp
->va_queue_first
;
915 msgdesc
.va_queue_last
= bcp
->va_queue_last
;
916 msg
= bcp
->bau_msg_head
;
917 while (msg
->sw_ack_vector
) {
919 msgdesc
.msg_slot
= msg
- msgdesc
.va_queue_first
;
920 msgdesc
.sw_ack_slot
= ffs(msg
->sw_ack_vector
) - 1;
922 uv_bau_process_message(&msgdesc
, bcp
);
924 if (msg
> msgdesc
.va_queue_last
)
925 msg
= msgdesc
.va_queue_first
;
926 bcp
->bau_msg_head
= msg
;
928 stat
->d_time
+= (get_cycles() - time_start
);
939 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
940 * shootdown message timeouts enabled. The timeout does not cause
941 * an interrupt, but causes an error message to be returned to
944 static void __init
uv_enable_timeouts(void)
949 unsigned long mmr_image
;
951 nuvhubs
= uv_num_possible_blades();
953 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
954 if (!uv_blade_nr_possible_cpus(uvhub
))
957 pnode
= uv_blade_to_pnode(uvhub
);
959 uv_read_global_mmr64(pnode
, UVH_LB_BAU_MISC_CONTROL
);
961 * Set the timeout period and then lock it in, in three
962 * steps; captures and locks in the period.
964 * To program the period, the SOFT_ACK_MODE must be off.
966 mmr_image
&= ~((unsigned long)1 <<
967 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
968 uv_write_global_mmr64
969 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
971 * Set the 4-bit period.
973 mmr_image
&= ~((unsigned long)0xf <<
974 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
975 mmr_image
|= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
<<
976 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
977 uv_write_global_mmr64
978 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
981 * Subsequent reversals of the timebase bit (3) cause an
982 * immediate timeout of one or all INTD resources as
983 * indicated in bits 2:0 (7 causes all of them to timeout).
985 mmr_image
|= ((unsigned long)1 <<
986 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
988 mmr_image
|= ((unsigned long)1 << UV2_LEG_SHFT
);
989 mmr_image
|= ((unsigned long)1 << UV2_EXT_SHFT
);
991 uv_write_global_mmr64
992 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
996 static void *uv_ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
998 if (*offset
< num_possible_cpus())
1003 static void *uv_ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
1006 if (*offset
< num_possible_cpus())
1011 static void uv_ptc_seq_stop(struct seq_file
*file
, void *data
)
1015 static inline unsigned long long
1016 microsec_2_cycles(unsigned long microsec
)
1019 unsigned long long cyc
;
1021 ns
= microsec
* 1000;
1022 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
1027 * Display the statistics thru /proc.
1028 * 'data' points to the cpu number
1030 static int uv_ptc_seq_show(struct seq_file
*file
, void *data
)
1032 struct ptc_stats
*stat
;
1035 cpu
= *(loff_t
*)data
;
1039 "# cpu sent stime self locals remotes ncpus localhub ");
1041 "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1043 "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
1045 "retries rok resetp resett giveup sto bz throt ");
1047 "sw_ack recv rtime all ");
1049 "one mult none retry canc nocan reset rcan ");
1051 "disable enable\n");
1053 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
1054 stat
= &per_cpu(ptcstats
, cpu
);
1055 /* source side statistics */
1057 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1058 cpu
, stat
->s_requestor
, cycles_2_us(stat
->s_time
),
1059 stat
->s_ntargself
, stat
->s_ntarglocals
,
1060 stat
->s_ntargremotes
, stat
->s_ntargcpu
,
1061 stat
->s_ntarglocaluvhub
, stat
->s_ntargremoteuvhub
,
1062 stat
->s_ntarguvhub
, stat
->s_ntarguvhub16
);
1063 seq_printf(file
, "%ld %ld %ld %ld %ld ",
1064 stat
->s_ntarguvhub8
, stat
->s_ntarguvhub4
,
1065 stat
->s_ntarguvhub2
, stat
->s_ntarguvhub1
,
1067 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1068 stat
->s_retry_messages
, stat
->s_retriesok
,
1069 stat
->s_resets_plug
, stat
->s_resets_timeout
,
1070 stat
->s_giveup
, stat
->s_stimeout
,
1071 stat
->s_busy
, stat
->s_throttles
);
1073 /* destination side statistics */
1075 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1076 uv_read_global_mmr64(uv_cpu_to_pnode(cpu
),
1077 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
),
1078 stat
->d_requestee
, cycles_2_us(stat
->d_time
),
1079 stat
->d_alltlb
, stat
->d_onetlb
, stat
->d_multmsg
,
1080 stat
->d_nomsg
, stat
->d_retries
, stat
->d_canceled
,
1081 stat
->d_nocanceled
, stat
->d_resets
,
1083 seq_printf(file
, "%ld %ld\n",
1084 stat
->s_bau_disabled
, stat
->s_bau_reenabled
);
1091 * Display the tunables thru debugfs
1093 static ssize_t
tunables_read(struct file
*file
, char __user
*userbuf
,
1094 size_t count
, loff_t
*ppos
)
1099 buf
= kasprintf(GFP_KERNEL
, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1100 "max_bau_concurrent plugged_delay plugsb4reset",
1101 "timeoutsb4reset ipi_reset_limit complete_threshold",
1102 "congested_response_us congested_reps congested_period",
1103 max_bau_concurrent
, plugged_delay
, plugsb4reset
,
1104 timeoutsb4reset
, ipi_reset_limit
, complete_threshold
,
1105 congested_response_us
, congested_reps
, congested_period
);
1110 ret
= simple_read_from_buffer(userbuf
, count
, ppos
, buf
, strlen(buf
));
1116 * -1: resetf the statistics
1117 * 0: display meaning of the statistics
1119 static ssize_t
uv_ptc_proc_write(struct file
*file
, const char __user
*user
,
1120 size_t count
, loff_t
*data
)
1125 struct ptc_stats
*stat
;
1127 if (count
== 0 || count
> sizeof(optstr
))
1129 if (copy_from_user(optstr
, user
, count
))
1131 optstr
[count
- 1] = '\0';
1132 if (strict_strtol(optstr
, 10, &input_arg
) < 0) {
1133 printk(KERN_DEBUG
"%s is invalid\n", optstr
);
1137 if (input_arg
== 0) {
1138 printk(KERN_DEBUG
"# cpu: cpu number\n");
1139 printk(KERN_DEBUG
"Sender statistics:\n");
1141 "sent: number of shootdown messages sent\n");
1143 "stime: time spent sending messages\n");
1145 "numuvhubs: number of hubs targeted with shootdown\n");
1147 "numuvhubs16: number times 16 or more hubs targeted\n");
1149 "numuvhubs8: number times 8 or more hubs targeted\n");
1151 "numuvhubs4: number times 4 or more hubs targeted\n");
1153 "numuvhubs2: number times 2 or more hubs targeted\n");
1155 "numuvhubs1: number times 1 hub targeted\n");
1157 "numcpus: number of cpus targeted with shootdown\n");
1159 "dto: number of destination timeouts\n");
1161 "retries: destination timeout retries sent\n");
1163 "rok: : destination timeouts successfully retried\n");
1165 "resetp: ipi-style resource resets for plugs\n");
1167 "resett: ipi-style resource resets for timeouts\n");
1169 "giveup: fall-backs to ipi-style shootdowns\n");
1171 "sto: number of source timeouts\n");
1173 "bz: number of stay-busy's\n");
1175 "throt: number times spun in throttle\n");
1176 printk(KERN_DEBUG
"Destination side statistics:\n");
1178 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1180 "recv: shootdown messages received\n");
1182 "rtime: time spent processing messages\n");
1184 "all: shootdown all-tlb messages\n");
1186 "one: shootdown one-tlb messages\n");
1188 "mult: interrupts that found multiple messages\n");
1190 "none: interrupts that found no messages\n");
1192 "retry: number of retry messages processed\n");
1194 "canc: number messages canceled by retries\n");
1196 "nocan: number retries that found nothing to cancel\n");
1198 "reset: number of ipi-style reset requests processed\n");
1200 "rcan: number messages canceled by reset requests\n");
1202 "disable: number times use of the BAU was disabled\n");
1204 "enable: number times use of the BAU was re-enabled\n");
1205 } else if (input_arg
== -1) {
1206 for_each_present_cpu(cpu
) {
1207 stat
= &per_cpu(ptcstats
, cpu
);
1208 memset(stat
, 0, sizeof(struct ptc_stats
));
1215 static int local_atoi(const char *name
)
1222 val
= 10*val
+(*name
-'0');
1232 * 0 values reset them to defaults
1234 static ssize_t
tunables_write(struct file
*file
, const char __user
*user
,
1235 size_t count
, loff_t
*data
)
1243 struct bau_control
*bcp
;
1245 if (count
== 0 || count
> sizeof(instr
)-1)
1247 if (copy_from_user(instr
, user
, count
))
1250 instr
[count
] = '\0';
1251 /* count the fields */
1252 p
= instr
+ strspn(instr
, WHITESPACE
);
1254 for (; *p
; p
= q
+ strspn(q
, WHITESPACE
)) {
1255 q
= p
+ strcspn(p
, WHITESPACE
);
1261 printk(KERN_INFO
"bau tunable error: should be 9 numbers\n");
1265 p
= instr
+ strspn(instr
, WHITESPACE
);
1267 for (cnt
= 0; *p
; p
= q
+ strspn(q
, WHITESPACE
), cnt
++) {
1268 q
= p
+ strcspn(p
, WHITESPACE
);
1269 val
= local_atoi(p
);
1273 max_bau_concurrent
= MAX_BAU_CONCURRENT
;
1274 max_bau_concurrent_constant
=
1278 bcp
= &per_cpu(bau_control
, smp_processor_id());
1279 if (val
< 1 || val
> bcp
->cpus_in_uvhub
) {
1281 "Error: BAU max concurrent %d is invalid\n",
1285 max_bau_concurrent
= val
;
1286 max_bau_concurrent_constant
= val
;
1290 plugged_delay
= PLUGGED_DELAY
;
1292 plugged_delay
= val
;
1296 plugsb4reset
= PLUGSB4RESET
;
1302 timeoutsb4reset
= TIMEOUTSB4RESET
;
1304 timeoutsb4reset
= val
;
1308 ipi_reset_limit
= IPI_RESET_LIMIT
;
1310 ipi_reset_limit
= val
;
1314 complete_threshold
= COMPLETE_THRESHOLD
;
1316 complete_threshold
= val
;
1320 congested_response_us
= CONGESTED_RESPONSE_US
;
1322 congested_response_us
= val
;
1326 congested_reps
= CONGESTED_REPS
;
1328 congested_reps
= val
;
1332 congested_period
= CONGESTED_PERIOD
;
1334 congested_period
= val
;
1340 for_each_present_cpu(cpu
) {
1341 bcp
= &per_cpu(bau_control
, cpu
);
1342 bcp
->max_bau_concurrent
= max_bau_concurrent
;
1343 bcp
->max_bau_concurrent_constant
= max_bau_concurrent
;
1344 bcp
->plugged_delay
= plugged_delay
;
1345 bcp
->plugsb4reset
= plugsb4reset
;
1346 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1347 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1348 bcp
->complete_threshold
= complete_threshold
;
1349 bcp
->congested_response_us
= congested_response_us
;
1350 bcp
->congested_reps
= congested_reps
;
1351 bcp
->congested_period
= congested_period
;
1356 static const struct seq_operations uv_ptc_seq_ops
= {
1357 .start
= uv_ptc_seq_start
,
1358 .next
= uv_ptc_seq_next
,
1359 .stop
= uv_ptc_seq_stop
,
1360 .show
= uv_ptc_seq_show
1363 static int uv_ptc_proc_open(struct inode
*inode
, struct file
*file
)
1365 return seq_open(file
, &uv_ptc_seq_ops
);
1368 static int tunables_open(struct inode
*inode
, struct file
*file
)
1373 static const struct file_operations proc_uv_ptc_operations
= {
1374 .open
= uv_ptc_proc_open
,
1376 .write
= uv_ptc_proc_write
,
1377 .llseek
= seq_lseek
,
1378 .release
= seq_release
,
1381 static const struct file_operations tunables_fops
= {
1382 .open
= tunables_open
,
1383 .read
= tunables_read
,
1384 .write
= tunables_write
,
1385 .llseek
= default_llseek
,
1388 static int __init
uv_ptc_init(void)
1390 struct proc_dir_entry
*proc_uv_ptc
;
1392 if (!is_uv_system())
1395 proc_uv_ptc
= proc_create(UV_PTC_BASENAME
, 0444, NULL
,
1396 &proc_uv_ptc_operations
);
1398 printk(KERN_ERR
"unable to create %s proc entry\n",
1403 tunables_dir
= debugfs_create_dir(UV_BAU_TUNABLES_DIR
, NULL
);
1404 if (!tunables_dir
) {
1405 printk(KERN_ERR
"unable to create debugfs directory %s\n",
1406 UV_BAU_TUNABLES_DIR
);
1409 tunables_file
= debugfs_create_file(UV_BAU_TUNABLES_FILE
, 0600,
1410 tunables_dir
, NULL
, &tunables_fops
);
1411 if (!tunables_file
) {
1412 printk(KERN_ERR
"unable to create debugfs file %s\n",
1413 UV_BAU_TUNABLES_FILE
);
1420 * Initialize the sending side's sending buffers.
1423 uv_activation_descriptor_init(int node
, int pnode
, int base_pnode
)
1430 struct bau_desc
*bau_desc
;
1431 struct bau_desc
*bd2
;
1432 struct bau_control
*bcp
;
1435 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1436 * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
1438 bau_desc
= kmalloc_node(sizeof(struct bau_desc
) * UV_ADP_SIZE
1439 * UV_ITEMS_PER_DESCRIPTOR
, GFP_KERNEL
, node
);
1442 pa
= uv_gpa(bau_desc
); /* need the real nasid*/
1443 n
= pa
>> uv_nshift
;
1446 /* the 14-bit pnode */
1447 uv_write_global_mmr64(pnode
, UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
1448 (n
<< UV_DESC_BASE_PNODE_SHIFT
| m
));
1450 * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1451 * cpu even though we only use the first one; one descriptor can
1452 * describe a broadcast to 256 uv hubs.
1454 for (i
= 0, bd2
= bau_desc
; i
< (UV_ADP_SIZE
*UV_ITEMS_PER_DESCRIPTOR
);
1456 memset(bd2
, 0, sizeof(struct bau_desc
));
1457 bd2
->header
.sw_ack_flag
= 1;
1459 * The base_dest_nasid set in the message header is the nasid
1460 * of the first uvhub in the partition. The bit map will
1461 * indicate destination pnode numbers relative to that base.
1462 * They may not be consecutive if nasid striding is being used.
1464 bd2
->header
.base_dest_nasid
= UV_PNODE_TO_NASID(base_pnode
);
1465 bd2
->header
.dest_subnodeid
= UV_LB_SUBNODEID
;
1466 bd2
->header
.command
= UV_NET_ENDPOINT_INTD
;
1467 bd2
->header
.int_both
= 1;
1469 * all others need to be set to zero:
1470 * fairness chaining multilevel count replied_to
1473 for_each_present_cpu(cpu
) {
1474 if (pnode
!= uv_blade_to_pnode(uv_cpu_to_blade_id(cpu
)))
1476 bcp
= &per_cpu(bau_control
, cpu
);
1477 bcp
->descriptor_base
= bau_desc
;
1482 * initialize the destination side's receiving buffers
1483 * entered for each uvhub in the partition
1484 * - node is first node (kernel memory notion) on the uvhub
1485 * - pnode is the uvhub's physical identifier
1488 uv_payload_queue_init(int node
, int pnode
)
1494 struct bau_payload_queue_entry
*pqp
;
1495 struct bau_payload_queue_entry
*pqp_malloc
;
1496 struct bau_control
*bcp
;
1498 pqp
= kmalloc_node((DEST_Q_SIZE
+ 1)
1499 * sizeof(struct bau_payload_queue_entry
),
1504 cp
= (char *)pqp
+ 31;
1505 pqp
= (struct bau_payload_queue_entry
*)(((unsigned long)cp
>> 5) << 5);
1507 for_each_present_cpu(cpu
) {
1508 if (pnode
!= uv_cpu_to_pnode(cpu
))
1510 /* for every cpu on this pnode: */
1511 bcp
= &per_cpu(bau_control
, cpu
);
1512 bcp
->va_queue_first
= pqp
;
1513 bcp
->bau_msg_head
= pqp
;
1514 bcp
->va_queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
1517 * need the pnode of where the memory was really allocated
1520 pn
= pa
>> uv_nshift
;
1521 uv_write_global_mmr64(pnode
,
1522 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST
,
1523 ((unsigned long)pn
<< UV_PAYLOADQ_PNODE_SHIFT
) |
1524 uv_physnodeaddr(pqp
));
1525 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL
,
1526 uv_physnodeaddr(pqp
));
1527 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST
,
1529 uv_physnodeaddr(pqp
+ (DEST_Q_SIZE
- 1)));
1530 /* in effect, all msg_type's are set to MSG_NOOP */
1531 memset(pqp
, 0, sizeof(struct bau_payload_queue_entry
) * DEST_Q_SIZE
);
1535 * Initialization of each UV hub's structures
1537 static void __init
uv_init_uvhub(int uvhub
, int vector
, int base_pnode
)
1541 unsigned long apicid
;
1543 node
= uvhub_to_first_node(uvhub
);
1544 pnode
= uv_blade_to_pnode(uvhub
);
1545 uv_activation_descriptor_init(node
, pnode
, base_pnode
);
1546 uv_payload_queue_init(node
, pnode
);
1548 * The below initialization can't be in firmware because the
1549 * messaging IRQ will be determined by the OS.
1551 apicid
= uvhub_to_first_apicid(uvhub
) | uv_apicid_hibits
;
1552 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_CONFIG
,
1553 ((apicid
<< 32) | vector
));
1557 * We will set BAU_MISC_CONTROL with a timeout period.
1558 * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1559 * So the destination timeout period has be be calculated from them.
1562 calculate_destination_timeout(void)
1564 unsigned long mmr_image
;
1570 unsigned long ts_ns
;
1573 mult1
= UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD
&
1574 BAU_MISC_CONTROL_MULT_MASK
;
1575 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1576 index
= (mmr_image
>> BAU_URGENCY_7_SHIFT
) & BAU_URGENCY_7_MASK
;
1577 mmr_image
= uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT
);
1578 mult2
= (mmr_image
>> BAU_TRANS_SHIFT
) & BAU_TRANS_MASK
;
1579 base
= timeout_base_ns
[index
];
1580 ts_ns
= base
* mult1
* mult2
;
1583 /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
1584 mmr_image
= uv_read_local_mmr(UVH_AGING_PRESCALE_SEL
);
1585 mmr_image
= (mmr_image
& UV_SA_MASK
) >> UV_SA_SHFT
;
1586 if (mmr_image
& ((unsigned long)1 << UV2_ACK_UNITS_SHFT
))
1590 base
= mmr_image
& UV2_ACK_MASK
;
1597 * initialize the bau_control structure for each cpu
1599 static int __init
uv_init_per_cpu(int nuvhubs
, int base_part_pnode
)
1608 unsigned short socket_mask
;
1609 unsigned char *uvhub_mask
;
1610 struct bau_control
*bcp
;
1611 struct uvhub_desc
*bdp
;
1612 struct socket_desc
*sdp
;
1613 struct bau_control
*hmaster
= NULL
;
1614 struct bau_control
*smaster
= NULL
;
1615 struct socket_desc
{
1617 short cpu_number
[MAX_CPUS_PER_SOCKET
];
1620 unsigned short socket_mask
;
1624 struct socket_desc socket
[2];
1626 struct uvhub_desc
*uvhub_descs
;
1628 timeout_us
= calculate_destination_timeout();
1630 uvhub_descs
= kmalloc(nuvhubs
* sizeof(struct uvhub_desc
), GFP_KERNEL
);
1631 memset(uvhub_descs
, 0, nuvhubs
* sizeof(struct uvhub_desc
));
1632 uvhub_mask
= kzalloc((nuvhubs
+7)/8, GFP_KERNEL
);
1633 for_each_present_cpu(cpu
) {
1634 bcp
= &per_cpu(bau_control
, cpu
);
1635 memset(bcp
, 0, sizeof(struct bau_control
));
1636 pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1637 if ((pnode
- base_part_pnode
) >= UV_DISTRIBUTION_SIZE
) {
1639 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1640 cpu
, pnode
, base_part_pnode
,
1641 UV_DISTRIBUTION_SIZE
);
1644 bcp
->osnode
= cpu_to_node(cpu
);
1645 bcp
->partition_base_pnode
= uv_partition_base_pnode
;
1646 uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1647 *(uvhub_mask
+ (uvhub
/8)) |= (1 << (uvhub
%8));
1648 bdp
= &uvhub_descs
[uvhub
];
1652 /* kludge: 'assuming' one node per socket, and assuming that
1653 disabling a socket just leaves a gap in node numbers */
1654 socket
= bcp
->osnode
& 1;
1655 bdp
->socket_mask
|= (1 << socket
);
1656 sdp
= &bdp
->socket
[socket
];
1657 sdp
->cpu_number
[sdp
->num_cpus
] = cpu
;
1659 if (sdp
->num_cpus
> MAX_CPUS_PER_SOCKET
) {
1660 printk(KERN_EMERG
"%d cpus per socket invalid\n", sdp
->num_cpus
);
1664 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1665 if (!(*(uvhub_mask
+ (uvhub
/8)) & (1 << (uvhub
%8))))
1668 bdp
= &uvhub_descs
[uvhub
];
1669 socket_mask
= bdp
->socket_mask
;
1671 while (socket_mask
) {
1672 if (!(socket_mask
& 1))
1674 sdp
= &bdp
->socket
[socket
];
1675 for (i
= 0; i
< sdp
->num_cpus
; i
++) {
1676 cpu
= sdp
->cpu_number
[i
];
1677 bcp
= &per_cpu(bau_control
, cpu
);
1681 if (!have_hmaster
) {
1686 bcp
->cpus_in_uvhub
= bdp
->num_cpus
;
1687 bcp
->cpus_in_socket
= sdp
->num_cpus
;
1688 bcp
->socket_master
= smaster
;
1689 bcp
->uvhub
= bdp
->uvhub
;
1690 bcp
->uvhub_master
= hmaster
;
1691 bcp
->uvhub_cpu
= uv_cpu_hub_info(cpu
)->
1693 if (bcp
->uvhub_cpu
>= MAX_CPUS_PER_UVHUB
) {
1695 "%d cpus per uvhub invalid\n",
1702 socket_mask
= (socket_mask
>> 1);
1703 /* each socket gets a local array of pnodes/hubs */
1705 bcp
->target_hub_and_pnode
= kmalloc_node(
1706 sizeof(struct hub_and_pnode
) *
1707 num_possible_cpus(), GFP_KERNEL
, bcp
->osnode
);
1708 memset(bcp
->target_hub_and_pnode
, 0,
1709 sizeof(struct hub_and_pnode
) *
1710 num_possible_cpus());
1711 for_each_present_cpu(tcpu
) {
1712 bcp
->target_hub_and_pnode
[tcpu
].pnode
=
1713 uv_cpu_hub_info(tcpu
)->pnode
;
1714 bcp
->target_hub_and_pnode
[tcpu
].uvhub
=
1715 uv_cpu_hub_info(tcpu
)->numa_blade_id
;
1721 for_each_present_cpu(cpu
) {
1722 bcp
= &per_cpu(bau_control
, cpu
);
1723 bcp
->baudisabled
= 0;
1724 bcp
->statp
= &per_cpu(ptcstats
, cpu
);
1725 /* time interval to catch a hardware stay-busy bug */
1726 bcp
->timeout_interval
= microsec_2_cycles(2*timeout_us
);
1727 bcp
->max_bau_concurrent
= max_bau_concurrent
;
1728 bcp
->max_bau_concurrent_constant
= max_bau_concurrent
;
1729 bcp
->plugged_delay
= plugged_delay
;
1730 bcp
->plugsb4reset
= plugsb4reset
;
1731 bcp
->timeoutsb4reset
= timeoutsb4reset
;
1732 bcp
->ipi_reset_limit
= ipi_reset_limit
;
1733 bcp
->complete_threshold
= complete_threshold
;
1734 bcp
->congested_response_us
= congested_response_us
;
1735 bcp
->congested_reps
= congested_reps
;
1736 bcp
->congested_period
= congested_period
;
1742 * Initialization of BAU-related structures
1744 static int __init
uv_bau_init(void)
1753 if (!is_uv_system())
1759 for_each_possible_cpu(cur_cpu
)
1760 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask
, cur_cpu
),
1761 GFP_KERNEL
, cpu_to_node(cur_cpu
));
1763 uv_nshift
= uv_hub_info
->m_val
;
1764 uv_mmask
= (1UL << uv_hub_info
->m_val
) - 1;
1765 nuvhubs
= uv_num_possible_blades();
1766 spin_lock_init(&disable_lock
);
1767 congested_cycles
= microsec_2_cycles(congested_response_us
);
1769 uv_partition_base_pnode
= 0x7fffffff;
1770 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
1771 if (uv_blade_nr_possible_cpus(uvhub
) &&
1772 (uv_blade_to_pnode(uvhub
) < uv_partition_base_pnode
))
1773 uv_partition_base_pnode
= uv_blade_to_pnode(uvhub
);
1776 if (uv_init_per_cpu(nuvhubs
, uv_partition_base_pnode
)) {
1781 vector
= UV_BAU_MESSAGE
;
1782 for_each_possible_blade(uvhub
)
1783 if (uv_blade_nr_possible_cpus(uvhub
))
1784 uv_init_uvhub(uvhub
, vector
, uv_partition_base_pnode
);
1786 uv_enable_timeouts();
1787 alloc_intr_gate(vector
, uv_bau_message_intr1
);
1789 for_each_possible_blade(uvhub
) {
1790 if (uv_blade_nr_possible_cpus(uvhub
)) {
1791 pnode
= uv_blade_to_pnode(uvhub
);
1793 uv_write_global_mmr64(pnode
,
1794 UVH_LB_BAU_SB_ACTIVATION_CONTROL
,
1795 ((unsigned long)1 << 63));
1796 mmr
= 1; /* should be 1 to broadcast to both sockets */
1797 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_BROADCAST
,
1804 core_initcall(uv_bau_init
);
1805 fs_initcall(uv_ptc_init
);