2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
13 #include <asm/mmu_context.h>
14 #include <asm/uv/uv.h>
15 #include <asm/uv/uv_mmrs.h>
16 #include <asm/uv/uv_hub.h>
17 #include <asm/uv/uv_bau.h>
21 #include <asm/irq_vectors.h>
25 static struct bau_control
**uv_bau_table_bases __read_mostly
;
26 static int uv_bau_retry_limit __read_mostly
;
28 /* position of pnode (which is nasid>>1): */
29 static int uv_nshift __read_mostly
;
31 static unsigned long uv_mmask __read_mostly
;
33 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
34 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
37 * Free a software acknowledge hardware resource by clearing its Pending
38 * bit. This will return a reply to the sender.
39 * If the message has timed out, a reply has already been sent by the
40 * hardware but the resource has not been released. In that case our
41 * clear of the Timeout bit (as well) will free the resource. No reply will
42 * be sent (the hardware will only do one reply per message).
44 static void uv_reply_to_message(int resource
,
45 struct bau_payload_queue_entry
*msg
,
46 struct bau_msg_status
*msp
)
50 dw
= (1 << (resource
+ UV_SW_ACK_NPENDING
)) | (1 << resource
);
52 msg
->sw_ack_vector
= 0;
54 msp
->seen_by
.bits
= 0;
55 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
, dw
);
59 * Do all the things a cpu should do for a TLB shootdown message.
60 * Other cpu's may come here at the same time for this message.
62 static void uv_bau_process_message(struct bau_payload_queue_entry
*msg
,
63 int msg_slot
, int sw_ack_slot
)
65 unsigned long this_cpu_mask
;
66 struct bau_msg_status
*msp
;
69 msp
= __get_cpu_var(bau_control
).msg_statuses
+ msg_slot
;
70 cpu
= uv_blade_processor_id();
72 uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
73 this_cpu_mask
= 1UL << cpu
;
74 if (msp
->seen_by
.bits
& this_cpu_mask
)
76 atomic_or_long(&msp
->seen_by
.bits
, this_cpu_mask
);
78 if (msg
->replied_to
== 1)
81 if (msg
->address
== TLB_FLUSH_ALL
) {
83 __get_cpu_var(ptcstats
).alltlb
++;
85 __flush_tlb_one(msg
->address
);
86 __get_cpu_var(ptcstats
).onetlb
++;
89 __get_cpu_var(ptcstats
).requestee
++;
91 atomic_inc_short(&msg
->acknowledge_count
);
92 if (msg
->number_of_cpus
== msg
->acknowledge_count
)
93 uv_reply_to_message(sw_ack_slot
, msg
, msp
);
97 * Examine the payload queue on one distribution node to see
98 * which messages have not been seen, and which cpu(s) have not seen them.
100 * Returns the number of cpu's that have not responded.
102 static int uv_examine_destination(struct bau_control
*bau_tablesp
, int sender
)
104 struct bau_payload_queue_entry
*msg
;
105 struct bau_msg_status
*msp
;
110 for (msg
= bau_tablesp
->va_queue_first
, i
= 0; i
< DEST_Q_SIZE
;
112 if ((msg
->sending_cpu
== sender
) && (!msg
->replied_to
)) {
113 msp
= bau_tablesp
->msg_statuses
+ i
;
115 "blade %d: address:%#lx %d of %d, not cpu(s): ",
116 i
, msg
->address
, msg
->acknowledge_count
,
117 msg
->number_of_cpus
);
118 for (j
= 0; j
< msg
->number_of_cpus
; j
++) {
119 if (!((1L << j
) & msp
->seen_by
.bits
)) {
131 * Examine the payload queue on all the distribution nodes to see
132 * which messages have not been seen, and which cpu(s) have not seen them.
134 * Returns the number of cpu's that have not responded.
136 static int uv_examine_destinations(struct bau_target_nodemask
*distribution
)
142 sender
= smp_processor_id();
143 for (i
= 0; i
< sizeof(struct bau_target_nodemask
) * BITSPERBYTE
; i
++) {
144 if (!bau_node_isset(i
, distribution
))
146 count
+= uv_examine_destination(uv_bau_table_bases
[i
], sender
);
152 * wait for completion of a broadcast message
154 * return COMPLETE, RETRY or GIVEUP
156 static int uv_wait_completion(struct bau_desc
*bau_desc
,
157 unsigned long mmr_offset
, int right_shift
)
160 long destination_timeouts
= 0;
161 long source_timeouts
= 0;
162 unsigned long descriptor_status
;
164 while ((descriptor_status
= (((unsigned long)
165 uv_read_local_mmr(mmr_offset
) >>
166 right_shift
) & UV_ACT_STATUS_MASK
)) !=
168 if (descriptor_status
== DESC_STATUS_SOURCE_TIMEOUT
) {
170 if (source_timeouts
> SOURCE_TIMEOUT_LIMIT
)
172 __get_cpu_var(ptcstats
).s_retry
++;
176 * spin here looking for progress at the destinations
178 if (descriptor_status
== DESC_STATUS_DESTINATION_TIMEOUT
) {
179 destination_timeouts
++;
180 if (destination_timeouts
> DESTINATION_TIMEOUT_LIMIT
) {
182 * returns number of cpus not responding
184 if (uv_examine_destinations
185 (&bau_desc
->distribution
) == 0) {
186 __get_cpu_var(ptcstats
).d_retry
++;
190 if (exams
>= uv_bau_retry_limit
) {
192 "uv_flush_tlb_others");
193 printk("giving up on cpu %d\n",
198 * delays can hang the simulator
201 destination_timeouts
= 0;
206 return FLUSH_COMPLETE
;
210 * uv_flush_send_and_wait
212 * Send a broadcast and wait for a broadcast message to complete.
214 * The flush_mask contains the cpus the broadcast was sent to.
216 * Returns NULL if all remote flushing was done. The mask is zeroed.
217 * Returns @flush_mask if some remote flushing remains to be done. The
218 * mask will have some bits still set.
220 const struct cpumask
*uv_flush_send_and_wait(int cpu
, int this_blade
,
221 struct bau_desc
*bau_desc
,
222 struct cpumask
*flush_mask
)
224 int completion_status
= 0;
229 unsigned long mmr_offset
;
234 if (cpu
< UV_CPUS_PER_ACT_STATUS
) {
235 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
236 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
238 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
240 ((cpu
- UV_CPUS_PER_ACT_STATUS
) * UV_ACT_STATUS_SIZE
);
242 time1
= get_cycles();
245 index
= (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
) |
247 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL
, index
);
248 completion_status
= uv_wait_completion(bau_desc
, mmr_offset
,
250 } while (completion_status
== FLUSH_RETRY
);
251 time2
= get_cycles();
252 __get_cpu_var(ptcstats
).sflush
+= (time2
- time1
);
254 __get_cpu_var(ptcstats
).retriesok
++;
256 if (completion_status
== FLUSH_GIVEUP
) {
258 * Cause the caller to do an IPI-style TLB shootdown on
259 * the cpu's, all of which are still in the mask.
261 __get_cpu_var(ptcstats
).ptc_i
++;
266 * Success, so clear the remote cpu's from the mask so we don't
267 * use the IPI method of shootdown on them.
269 for_each_cpu(bit
, flush_mask
) {
270 blade
= uv_cpu_to_blade_id(bit
);
271 if (blade
== this_blade
)
273 cpumask_clear_cpu(bit
, flush_mask
);
275 if (!cpumask_empty(flush_mask
))
281 * uv_flush_tlb_others - globally purge translation cache of a virtual
282 * address or all TLB's
283 * @cpumask: mask of all cpu's in which the address is to be removed
284 * @mm: mm_struct containing virtual address range
285 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
286 * @cpu: the current cpu
288 * This is the entry point for initiating any UV global TLB shootdown.
290 * Purges the translation caches of all specified processors of the given
291 * virtual address, or purges all TLB's on specified processors.
293 * The caller has derived the cpumask from the mm_struct. This function
294 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
296 * The cpumask is converted into a nodemask of the nodes containing
299 * Note that this function should be called with preemption disabled.
301 * Returns NULL if all remote flushing was done.
302 * Returns pointer to cpumask if some remote flushing remains to be
303 * done. The returned pointer is valid till preemption is re-enabled.
305 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
306 struct mm_struct
*mm
,
307 unsigned long va
, unsigned int cpu
)
309 static DEFINE_PER_CPU(cpumask_t
, flush_tlb_mask
);
310 struct cpumask
*flush_mask
= &__get_cpu_var(flush_tlb_mask
);
317 struct bau_desc
*bau_desc
;
319 WARN_ON(!in_atomic());
321 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
323 uv_cpu
= uv_blade_processor_id();
324 this_blade
= uv_numa_blade_id();
325 bau_desc
= __get_cpu_var(bau_control
).descriptor_base
;
326 bau_desc
+= UV_ITEMS_PER_DESCRIPTOR
* uv_cpu
;
328 bau_nodes_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
331 for_each_cpu(bit
, flush_mask
) {
332 blade
= uv_cpu_to_blade_id(bit
);
333 BUG_ON(blade
> (UV_DISTRIBUTION_SIZE
- 1));
334 if (blade
== this_blade
) {
338 bau_node_set(blade
, &bau_desc
->distribution
);
343 * no off_node flushing; return status for local node
350 __get_cpu_var(ptcstats
).requestor
++;
351 __get_cpu_var(ptcstats
).ntargeted
+= i
;
353 bau_desc
->payload
.address
= va
;
354 bau_desc
->payload
.sending_cpu
= cpu
;
356 return uv_flush_send_and_wait(uv_cpu
, this_blade
, bau_desc
, flush_mask
);
360 * The BAU message interrupt comes here. (registered by set_intr_gate)
363 * We received a broadcast assist message.
365 * Interrupts may have been disabled; this interrupt could represent
366 * the receipt of several messages.
368 * All cores/threads on this node get this interrupt.
369 * The last one to see it does the s/w ack.
370 * (the resource will not be freed until noninterruptable cpus see this
371 * interrupt; hardware will timeout the s/w ack and reply ERROR)
373 void uv_bau_message_interrupt(struct pt_regs
*regs
)
375 struct bau_payload_queue_entry
*va_queue_first
;
376 struct bau_payload_queue_entry
*va_queue_last
;
377 struct bau_payload_queue_entry
*msg
;
378 struct pt_regs
*old_regs
= set_irq_regs(regs
);
385 unsigned long local_pnode
;
391 time1
= get_cycles();
393 local_pnode
= uv_blade_to_pnode(uv_numa_blade_id());
395 va_queue_first
= __get_cpu_var(bau_control
).va_queue_first
;
396 va_queue_last
= __get_cpu_var(bau_control
).va_queue_last
;
398 msg
= __get_cpu_var(bau_control
).bau_msg_head
;
399 while (msg
->sw_ack_vector
) {
401 fw
= msg
->sw_ack_vector
;
402 msg_slot
= msg
- va_queue_first
;
403 sw_ack_slot
= ffs(fw
) - 1;
405 uv_bau_process_message(msg
, msg_slot
, sw_ack_slot
);
408 if (msg
> va_queue_last
)
409 msg
= va_queue_first
;
410 __get_cpu_var(bau_control
).bau_msg_head
= msg
;
413 __get_cpu_var(ptcstats
).nomsg
++;
415 __get_cpu_var(ptcstats
).multmsg
++;
417 time2
= get_cycles();
418 __get_cpu_var(ptcstats
).dflush
+= (time2
- time1
);
421 set_irq_regs(old_regs
);
424 static void uv_enable_timeouts(void)
431 unsigned long apicid
;
434 for_each_online_node(i
) {
435 blade
= uv_node_to_blade_id(i
);
436 if (blade
== last_blade
)
439 apicid
= per_cpu(x86_cpu_to_apicid
, cur_cpu
);
440 pnode
= uv_blade_to_pnode(blade
);
441 cur_cpu
+= uv_blade_nr_possible_cpus(i
);
445 static void *uv_ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
447 if (*offset
< num_possible_cpus())
452 static void *uv_ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
455 if (*offset
< num_possible_cpus())
460 static void uv_ptc_seq_stop(struct seq_file
*file
, void *data
)
465 * Display the statistics thru /proc
466 * data points to the cpu number
468 static int uv_ptc_seq_show(struct seq_file
*file
, void *data
)
470 struct ptc_stats
*stat
;
473 cpu
= *(loff_t
*)data
;
477 "# cpu requestor requestee one all sretry dretry ptc_i ");
479 "sw_ack sflush dflush sok dnomsg dmult starget\n");
481 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
482 stat
= &per_cpu(ptcstats
, cpu
);
483 seq_printf(file
, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
484 cpu
, stat
->requestor
,
485 stat
->requestee
, stat
->onetlb
, stat
->alltlb
,
486 stat
->s_retry
, stat
->d_retry
, stat
->ptc_i
);
487 seq_printf(file
, "%lx %ld %ld %ld %ld %ld %ld\n",
488 uv_read_global_mmr64(uv_blade_to_pnode
489 (uv_cpu_to_blade_id(cpu
)),
490 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
),
491 stat
->sflush
, stat
->dflush
,
492 stat
->retriesok
, stat
->nomsg
,
493 stat
->multmsg
, stat
->ntargeted
);
500 * 0: display meaning of the statistics
503 static ssize_t
uv_ptc_proc_write(struct file
*file
, const char __user
*user
,
504 size_t count
, loff_t
*data
)
509 if (count
== 0 || count
> sizeof(optstr
))
511 if (copy_from_user(optstr
, user
, count
))
513 optstr
[count
- 1] = '\0';
514 if (strict_strtoul(optstr
, 10, &newmode
) < 0) {
515 printk(KERN_DEBUG
"%s is invalid\n", optstr
);
520 printk(KERN_DEBUG
"# cpu: cpu number\n");
522 "requestor: times this cpu was the flush requestor\n");
524 "requestee: times this cpu was requested to flush its TLBs\n");
526 "one: times requested to flush a single address\n");
528 "all: times requested to flush all TLB's\n");
530 "sretry: number of retries of source-side timeouts\n");
532 "dretry: number of retries of destination-side timeouts\n");
534 "ptc_i: times UV fell through to IPI-style flushes\n");
536 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
538 "sflush_us: cycles spent in uv_flush_tlb_others()\n");
540 "dflush_us: cycles spent in handling flush requests\n");
541 printk(KERN_DEBUG
"sok: successes on retry\n");
542 printk(KERN_DEBUG
"dnomsg: interrupts with no message\n");
544 "dmult: interrupts with multiple messages\n");
545 printk(KERN_DEBUG
"starget: nodes targeted\n");
547 uv_bau_retry_limit
= newmode
;
548 printk(KERN_DEBUG
"timeout retry limit:%d\n",
555 static const struct seq_operations uv_ptc_seq_ops
= {
556 .start
= uv_ptc_seq_start
,
557 .next
= uv_ptc_seq_next
,
558 .stop
= uv_ptc_seq_stop
,
559 .show
= uv_ptc_seq_show
562 static int uv_ptc_proc_open(struct inode
*inode
, struct file
*file
)
564 return seq_open(file
, &uv_ptc_seq_ops
);
567 static const struct file_operations proc_uv_ptc_operations
= {
568 .open
= uv_ptc_proc_open
,
570 .write
= uv_ptc_proc_write
,
572 .release
= seq_release
,
575 static int __init
uv_ptc_init(void)
577 struct proc_dir_entry
*proc_uv_ptc
;
582 proc_uv_ptc
= create_proc_entry(UV_PTC_BASENAME
, 0444, NULL
);
584 printk(KERN_ERR
"unable to create %s proc entry\n",
588 proc_uv_ptc
->proc_fops
= &proc_uv_ptc_operations
;
593 * begin the initialization of the per-blade control structures
595 static struct bau_control
* __init
uv_table_bases_init(int blade
, int node
)
598 struct bau_msg_status
*msp
;
599 struct bau_control
*bau_tabp
;
602 kmalloc_node(sizeof(struct bau_control
), GFP_KERNEL
, node
);
605 bau_tabp
->msg_statuses
=
606 kmalloc_node(sizeof(struct bau_msg_status
) *
607 DEST_Q_SIZE
, GFP_KERNEL
, node
);
608 BUG_ON(!bau_tabp
->msg_statuses
);
610 for (i
= 0, msp
= bau_tabp
->msg_statuses
; i
< DEST_Q_SIZE
; i
++, msp
++)
611 bau_cpubits_clear(&msp
->seen_by
, (int)
612 uv_blade_nr_possible_cpus(blade
));
614 uv_bau_table_bases
[blade
] = bau_tabp
;
620 * finish the initialization of the per-blade control structures
623 uv_table_bases_finish(int blade
, int node
, int cur_cpu
,
624 struct bau_control
*bau_tablesp
,
625 struct bau_desc
*adp
)
627 struct bau_control
*bcp
;
630 for (i
= cur_cpu
; i
< cur_cpu
+ uv_blade_nr_possible_cpus(blade
); i
++) {
631 bcp
= (struct bau_control
*)&per_cpu(bau_control
, i
);
633 bcp
->bau_msg_head
= bau_tablesp
->va_queue_first
;
634 bcp
->va_queue_first
= bau_tablesp
->va_queue_first
;
635 bcp
->va_queue_last
= bau_tablesp
->va_queue_last
;
636 bcp
->msg_statuses
= bau_tablesp
->msg_statuses
;
637 bcp
->descriptor_base
= adp
;
642 * initialize the sending side's sending buffers
644 static struct bau_desc
* __init
645 uv_activation_descriptor_init(int node
, int pnode
)
651 unsigned long mmr_image
;
652 struct bau_desc
*adp
;
653 struct bau_desc
*ad2
;
655 adp
= (struct bau_desc
*)
656 kmalloc_node(16384, GFP_KERNEL
, node
);
659 pa
= __pa((unsigned long)adp
);
663 mmr_image
= uv_read_global_mmr64(pnode
, UVH_LB_BAU_SB_DESCRIPTOR_BASE
);
665 uv_write_global_mmr64(pnode
, (unsigned long)
666 UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
667 (n
<< UV_DESC_BASE_PNODE_SHIFT
| m
));
670 for (i
= 0, ad2
= adp
; i
< UV_ACTIVATION_DESCRIPTOR_SIZE
; i
++, ad2
++) {
671 memset(ad2
, 0, sizeof(struct bau_desc
));
672 ad2
->header
.sw_ack_flag
= 1;
673 ad2
->header
.base_dest_nodeid
=
674 uv_blade_to_pnode(uv_cpu_to_blade_id(0));
675 ad2
->header
.command
= UV_NET_ENDPOINT_INTD
;
676 ad2
->header
.int_both
= 1;
678 * all others need to be set to zero:
679 * fairness chaining multilevel count replied_to
686 * initialize the destination side's receiving buffers
688 static struct bau_payload_queue_entry
* __init
689 uv_payload_queue_init(int node
, int pnode
, struct bau_control
*bau_tablesp
)
691 struct bau_payload_queue_entry
*pqp
;
694 pqp
= (struct bau_payload_queue_entry
*) kmalloc_node(
695 (DEST_Q_SIZE
+ 1) * sizeof(struct bau_payload_queue_entry
),
699 cp
= (char *)pqp
+ 31;
700 pqp
= (struct bau_payload_queue_entry
*)(((unsigned long)cp
>> 5) << 5);
701 bau_tablesp
->va_queue_first
= pqp
;
702 uv_write_global_mmr64(pnode
,
703 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST
,
704 ((unsigned long)pnode
<<
705 UV_PAYLOADQ_PNODE_SHIFT
) |
706 uv_physnodeaddr(pqp
));
707 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL
,
708 uv_physnodeaddr(pqp
));
709 bau_tablesp
->va_queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
710 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST
,
712 uv_physnodeaddr(bau_tablesp
->va_queue_last
));
713 memset(pqp
, 0, sizeof(struct bau_payload_queue_entry
) * DEST_Q_SIZE
);
719 * Initialization of each UV blade's structures
721 static int __init
uv_init_blade(int blade
, int node
, int cur_cpu
)
725 unsigned long apicid
;
726 struct bau_desc
*adp
;
727 struct bau_payload_queue_entry
*pqp
;
728 struct bau_control
*bau_tablesp
;
730 bau_tablesp
= uv_table_bases_init(blade
, node
);
731 pnode
= uv_blade_to_pnode(blade
);
732 adp
= uv_activation_descriptor_init(node
, pnode
);
733 pqp
= uv_payload_queue_init(node
, pnode
, bau_tablesp
);
734 uv_table_bases_finish(blade
, node
, cur_cpu
, bau_tablesp
, adp
);
736 * the below initialization can't be in firmware because the
737 * messaging IRQ will be determined by the OS
739 apicid
= per_cpu(x86_cpu_to_apicid
, cur_cpu
);
740 pa
= uv_read_global_mmr64(pnode
, UVH_BAU_DATA_CONFIG
);
741 if ((pa
& 0xff) != UV_BAU_MESSAGE
) {
742 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_CONFIG
,
743 ((apicid
<< 32) | UV_BAU_MESSAGE
));
749 * Initialization of BAU-related structures
751 static int __init
uv_bau_init(void)
762 uv_bau_retry_limit
= 1;
763 uv_nshift
= uv_hub_info
->n_val
;
764 uv_mmask
= (1UL << uv_hub_info
->n_val
) - 1;
767 for_each_online_node(node
) {
768 blade
= uv_node_to_blade_id(node
);
769 if (blade
== last_blade
)
774 uv_bau_table_bases
= (struct bau_control
**)
775 kmalloc(nblades
* sizeof(struct bau_control
*), GFP_KERNEL
);
776 BUG_ON(!uv_bau_table_bases
);
779 for_each_online_node(node
) {
780 blade
= uv_node_to_blade_id(node
);
781 if (blade
== last_blade
)
784 uv_init_blade(blade
, node
, cur_cpu
);
785 cur_cpu
+= uv_blade_nr_possible_cpus(blade
);
787 alloc_intr_gate(UV_BAU_MESSAGE
, uv_bau_message_intr1
);
788 uv_enable_timeouts();
792 __initcall(uv_bau_init
);
793 __initcall(uv_ptc_init
);