2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/miscdevice.h>
30 #include <linux/proc_fs.h>
31 #include <linux/interrupt.h>
32 #include <linux/uaccess.h>
33 #include <linux/delay.h>
36 #include "grutables.h"
37 #include "grukservices.h"
38 #include "gru_instructions.h"
39 #include <asm/uv/uv_hub.h>
44 * The following is an interim algorithm for management of kernel GRU
45 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements.
48 * Blade percpu resources reserved for kernel use. These resources are
49 * reserved whenever the the kernel context for the blade is loaded. Note
50 * that the kernel context is not guaranteed to be always available. It is
51 * loaded on demand & can be stolen by a user if the user demand exceeds the
52 * kernel demand. The kernel can always reload the kernel context but
53 * a SLEEP may be required!!!.
57 * Each blade has one "kernel context" that owns GRU kernel resources
58 * located on the blade. Kernel drivers use GRU resources in this context
59 * for sending messages, zeroing memory, etc.
61 * The kernel context is dynamically loaded on demand. If it is not in
62 * use by the kernel, the kernel context can be unloaded & given to a user.
63 * The kernel context will be reloaded when needed. This may require that
64 * a context be stolen from a user.
65 * NOTE: frequent unloading/reloading of the kernel context is
66 * expensive. We are depending on batch schedulers, cpusets, sane
67 * drivers or some other mechanism to prevent the need for frequent
70 * The kernel context consists of two parts:
71 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
72 * Each cpu has it's own private resources & does not share them
73 * with other cpus. These resources are used serially, ie,
74 * locked, used & unlocked on each call to a function in
76 * (Now that we have dynamic loading of kernel contexts, I
77 * may rethink this & allow sharing between cpus....)
79 * - Additional resources can be reserved long term & used directly
80 * by UV drivers located in the kernel. Drivers using these GRU
81 * resources can use asynchronous GRU instructions that send
82 * interrupts on completion.
83 * - these resources must be explicitly locked/unlocked
84 * - locked resources prevent (obviously) the kernel
85 * context from being unloaded.
86 * - drivers using these resource directly issue their own
87 * GRU instruction and must wait/check completion.
89 * When these resources are reserved, the caller can optionally
90 * associate a wait_queue with the resources and use asynchronous
91 * GRU instructions. When an async GRU instruction completes, the
92 * driver will do a wakeup on the event.
97 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
98 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
99 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
100 #define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
101 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
102 #define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
104 #define GRU_NUM_KERNEL_CBR 1
105 #define GRU_NUM_KERNEL_DSR_BYTES 256
106 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
107 GRU_CACHE_LINE_BYTES)
109 /* GRU instruction attributes for all instructions */
110 #define IMA IMA_CB_DELAY
112 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
113 #define __gru_cacheline_aligned__ \
114 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
116 #define MAGIC 0x1234567887654321UL
118 /* Default retry count for GRU errors on kernel instructions */
119 #define EXCEPTION_RETRY_LIMIT 3
121 /* Status of message queue sections */
126 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
127 /* optimized for x86_64 */
128 struct message_queue
{
129 union gru_mesqhead head __gru_cacheline_aligned__
; /* CL 0 */
130 int qlines
; /* DW 1 */
132 void *next __gru_cacheline_aligned__
;/* CL 1 */
136 char data ____cacheline_aligned
; /* CL 2 */
139 /* First word in every message - used by mesq interface */
140 struct message_header
{
147 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
150 * Reload the blade's kernel context into a GRU chiplet. Called holding
151 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
153 static void gru_load_kernel_context(struct gru_blade_state
*bs
, int blade_id
)
155 struct gru_state
*gru
;
156 struct gru_thread_state
*kgts
;
160 up_read(&bs
->bs_kgts_sema
);
161 down_write(&bs
->bs_kgts_sema
);
164 bs
->bs_kgts
= gru_alloc_gts(NULL
, 0, 0, 0, 0);
165 bs
->bs_kgts
->ts_user_blade_id
= blade_id
;
170 STAT(load_kernel_context
);
171 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
172 kgts
->ts_cbr_au_count
= GRU_CB_COUNT_TO_AU(
173 GRU_NUM_KERNEL_CBR
* ncpus
+ bs
->bs_async_cbrs
);
174 kgts
->ts_dsr_au_count
= GRU_DS_BYTES_TO_AU(
175 GRU_NUM_KERNEL_DSR_BYTES
* ncpus
+
176 bs
->bs_async_dsr_bytes
);
177 while (!gru_assign_gru_context(kgts
)) {
179 gru_steal_context(kgts
);
181 gru_load_context(kgts
);
182 gru
= bs
->bs_kgts
->ts_gru
;
183 vaddr
= gru
->gs_gru_base_vaddr
;
184 ctxnum
= kgts
->ts_ctxnum
;
185 bs
->kernel_cb
= get_gseg_base_address_cb(vaddr
, ctxnum
, 0);
186 bs
->kernel_dsr
= get_gseg_base_address_ds(vaddr
, ctxnum
, 0);
188 downgrade_write(&bs
->bs_kgts_sema
);
192 * Free all kernel contexts that are not currently in use.
193 * Returns 0 if all freed, else number of inuse context.
195 static int gru_free_kernel_contexts(void)
197 struct gru_blade_state
*bs
;
198 struct gru_thread_state
*kgts
;
201 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++) {
206 /* Ignore busy contexts. Don't want to block here. */
207 if (down_write_trylock(&bs
->bs_kgts_sema
)) {
209 if (kgts
&& kgts
->ts_gru
)
210 gru_unload_context(kgts
, 0);
212 up_write(&bs
->bs_kgts_sema
);
222 * Lock & load the kernel context for the specified blade.
224 static struct gru_blade_state
*gru_lock_kernel_context(int blade_id
)
226 struct gru_blade_state
*bs
;
228 STAT(lock_kernel_context
);
229 bs
= gru_base
[blade_id
];
231 down_read(&bs
->bs_kgts_sema
);
232 if (!bs
->bs_kgts
|| !bs
->bs_kgts
->ts_gru
)
233 gru_load_kernel_context(bs
, blade_id
);
239 * Unlock the kernel context for the specified blade. Context is not
240 * unloaded but may be stolen before next use.
242 static void gru_unlock_kernel_context(int blade_id
)
244 struct gru_blade_state
*bs
;
246 bs
= gru_base
[blade_id
];
247 up_read(&bs
->bs_kgts_sema
);
248 STAT(unlock_kernel_context
);
252 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
253 * - returns with preemption disabled
255 static int gru_get_cpu_resources(int dsr_bytes
, void **cb
, void **dsr
)
257 struct gru_blade_state
*bs
;
260 BUG_ON(dsr_bytes
> GRU_NUM_KERNEL_DSR_BYTES
);
262 bs
= gru_lock_kernel_context(uv_numa_blade_id());
263 lcpu
= uv_blade_processor_id();
264 *cb
= bs
->kernel_cb
+ lcpu
* GRU_HANDLE_STRIDE
;
265 *dsr
= bs
->kernel_dsr
+ lcpu
* GRU_NUM_KERNEL_DSR_BYTES
;
270 * Free the current cpus reserved DSR/CBR resources.
272 static void gru_free_cpu_resources(void *cb
, void *dsr
)
274 gru_unlock_kernel_context(uv_numa_blade_id());
279 * Reserve GRU resources to be used asynchronously.
280 * Note: currently supports only 1 reservation per blade.
283 * blade_id - blade on which resources should be reserved
284 * cbrs - number of CBRs
285 * dsr_bytes - number of DSR bytes needed
287 * handle to identify resource
288 * (0 = async resources already reserved)
290 unsigned long gru_reserve_async_resources(int blade_id
, int cbrs
, int dsr_bytes
,
291 struct completion
*cmp
)
293 struct gru_blade_state
*bs
;
294 struct gru_thread_state
*kgts
;
297 bs
= gru_base
[blade_id
];
299 down_write(&bs
->bs_kgts_sema
);
301 /* Verify no resources already reserved */
302 if (bs
->bs_async_dsr_bytes
+ bs
->bs_async_cbrs
)
304 bs
->bs_async_dsr_bytes
= dsr_bytes
;
305 bs
->bs_async_cbrs
= cbrs
;
306 bs
->bs_async_wq
= cmp
;
309 /* Resources changed. Unload context if already loaded */
310 if (kgts
&& kgts
->ts_gru
)
311 gru_unload_context(kgts
, 0);
312 ret
= ASYNC_BID_TO_HAN(blade_id
);
315 up_write(&bs
->bs_kgts_sema
);
320 * Release async resources previously reserved.
323 * han - handle to identify resources
325 void gru_release_async_resources(unsigned long han
)
327 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
329 down_write(&bs
->bs_kgts_sema
);
330 bs
->bs_async_dsr_bytes
= 0;
331 bs
->bs_async_cbrs
= 0;
332 bs
->bs_async_wq
= NULL
;
333 up_write(&bs
->bs_kgts_sema
);
337 * Wait for async GRU instructions to complete.
340 * han - handle to identify resources
342 void gru_wait_async_cbr(unsigned long han
)
344 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
346 wait_for_completion(bs
->bs_async_wq
);
351 * Lock previous reserved async GRU resources
354 * han - handle to identify resources
356 * cb - pointer to first CBR
357 * dsr - pointer to first DSR
359 void gru_lock_async_resource(unsigned long han
, void **cb
, void **dsr
)
361 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
362 int blade_id
= ASYNC_HAN_TO_BID(han
);
365 gru_lock_kernel_context(blade_id
);
366 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
368 *cb
= bs
->kernel_cb
+ ncpus
* GRU_HANDLE_STRIDE
;
370 *dsr
= bs
->kernel_dsr
+ ncpus
* GRU_NUM_KERNEL_DSR_BYTES
;
374 * Unlock previous reserved async GRU resources
377 * han - handle to identify resources
379 void gru_unlock_async_resource(unsigned long han
)
381 int blade_id
= ASYNC_HAN_TO_BID(han
);
383 gru_unlock_kernel_context(blade_id
);
386 /*----------------------------------------------------------------------*/
387 int gru_get_cb_exception_detail(void *cb
,
388 struct control_block_extended_exc_detail
*excdet
)
390 struct gru_control_block_extended
*cbe
;
391 struct gru_blade_state
*bs
;
395 cbrnum
= thread_cbr_number(bs
->bs_kgts
, get_cb_number(cb
));
396 cbe
= get_cbe(GRUBASE(cb
), cbrnum
);
397 gru_flush_cache(cbe
); /* CBE not coherent */
399 excdet
->opc
= cbe
->opccpy
;
400 excdet
->exopc
= cbe
->exopccpy
;
401 excdet
->ecause
= cbe
->ecause
;
402 excdet
->exceptdet0
= cbe
->idef1upd
;
403 excdet
->exceptdet1
= cbe
->idef3upd
;
404 gru_flush_cache(cbe
);
408 char *gru_get_cb_exception_detail_str(int ret
, void *cb
,
411 struct gru_control_block_status
*gen
= (void *)cb
;
412 struct control_block_extended_exc_detail excdet
;
414 if (ret
> 0 && gen
->istatus
== CBS_EXCEPTION
) {
415 gru_get_cb_exception_detail(cb
, &excdet
);
417 "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
418 "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
419 gen
, excdet
.opc
, excdet
.exopc
, excdet
.ecause
,
420 excdet
.exceptdet0
, excdet
.exceptdet1
);
422 snprintf(buf
, size
, "No exception");
427 static int gru_wait_idle_or_exception(struct gru_control_block_status
*gen
)
429 while (gen
->istatus
>= CBS_ACTIVE
) {
436 static int gru_retry_exception(void *cb
)
438 struct gru_control_block_status
*gen
= (void *)cb
;
439 struct control_block_extended_exc_detail excdet
;
440 int retry
= EXCEPTION_RETRY_LIMIT
;
443 if (gru_wait_idle_or_exception(gen
) == CBS_IDLE
)
445 if (gru_get_cb_message_queue_substatus(cb
))
446 return CBS_EXCEPTION
;
447 gru_get_cb_exception_detail(cb
, &excdet
);
448 if ((excdet
.ecause
& ~EXCEPTION_RETRY_BITS
) ||
449 (excdet
.cbrexecstatus
& CBR_EXS_ABORT_OCC
))
454 gru_flush_cache(gen
);
456 return CBS_EXCEPTION
;
459 int gru_check_status_proc(void *cb
)
461 struct gru_control_block_status
*gen
= (void *)cb
;
465 if (ret
== CBS_EXCEPTION
)
466 ret
= gru_retry_exception(cb
);
472 int gru_wait_proc(void *cb
)
474 struct gru_control_block_status
*gen
= (void *)cb
;
477 ret
= gru_wait_idle_or_exception(gen
);
478 if (ret
== CBS_EXCEPTION
)
479 ret
= gru_retry_exception(cb
);
484 void gru_abort(int ret
, void *cb
, char *str
)
486 char buf
[GRU_EXC_STR_SIZE
];
488 panic("GRU FATAL ERROR: %s - %s\n", str
,
489 gru_get_cb_exception_detail_str(ret
, cb
, buf
, sizeof(buf
)));
492 void gru_wait_abort_proc(void *cb
)
496 ret
= gru_wait_proc(cb
);
498 gru_abort(ret
, cb
, "gru_wait_abort");
502 /*------------------------------ MESSAGE QUEUES -----------------------------*/
504 /* Internal status . These are NOT returned to the user. */
505 #define MQIE_AGAIN -1 /* try again */
509 * Save/restore the "present" flag that is in the second line of 2-line
512 static inline int get_present2(void *p
)
514 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
515 return mhdr
->present
;
518 static inline void restore_present2(void *p
, int val
)
520 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
525 * Create a message queue.
526 * qlines - message queue size in cache lines. Includes 2-line header.
528 int gru_create_message_queue(struct gru_message_queue_desc
*mqd
,
529 void *p
, unsigned int bytes
, int nasid
, int vector
, int apicid
)
531 struct message_queue
*mq
= p
;
534 qlines
= bytes
/ GRU_CACHE_LINE_BYTES
- 2;
535 memset(mq
, 0, bytes
);
536 mq
->start
= &mq
->data
;
537 mq
->start2
= &mq
->data
+ (qlines
/ 2 - 1) * GRU_CACHE_LINE_BYTES
;
538 mq
->next
= &mq
->data
;
539 mq
->limit
= &mq
->data
+ (qlines
- 2) * GRU_CACHE_LINE_BYTES
;
543 mq
->head
= gru_mesq_head(2, qlines
/ 2 + 1);
545 mqd
->mq_gpa
= uv_gpa(mq
);
546 mqd
->qlines
= qlines
;
547 mqd
->interrupt_pnode
= UV_NASID_TO_PNODE(nasid
);
548 mqd
->interrupt_vector
= vector
;
549 mqd
->interrupt_apicid
= apicid
;
552 EXPORT_SYMBOL_GPL(gru_create_message_queue
);
555 * Send a NOOP message to a message queue
557 * 0 - if queue is full after the send. This is the normal case
558 * but various races can change this.
559 * -1 - if mesq sent successfully but queue not full
560 * >0 - unexpected error. MQE_xxx returned
562 static int send_noop_message(void *cb
, struct gru_message_queue_desc
*mqd
,
565 const struct message_header noop_header
= {
566 .present
= MQS_NOOP
, .lines
= 1};
569 struct message_header save_mhdr
, *mhdr
= mesg
;
574 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), 1, IMA
);
578 substatus
= gru_get_cb_message_queue_substatus(cb
);
581 STAT(mesq_noop_unexpected_error
);
582 ret
= MQE_UNEXPECTED_CB_ERR
;
584 case CBSS_LB_OVERFLOWED
:
585 STAT(mesq_noop_lb_overflow
);
586 ret
= MQE_CONGESTION
;
588 case CBSS_QLIMIT_REACHED
:
589 STAT(mesq_noop_qlimit_reached
);
592 case CBSS_AMO_NACKED
:
593 STAT(mesq_noop_amo_nacked
);
594 ret
= MQE_CONGESTION
;
596 case CBSS_PUT_NACKED
:
597 STAT(mesq_noop_put_nacked
);
598 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
599 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, 1, 1,
601 if (gru_wait(cb
) == CBS_IDLE
)
604 ret
= MQE_UNEXPECTED_CB_ERR
;
606 case CBSS_PAGE_OVERFLOW
:
607 STAT(mesq_noop_page_overflow
);
618 * Handle a gru_mesq full.
620 static int send_message_queue_full(void *cb
, struct gru_message_queue_desc
*mqd
,
621 void *mesg
, int lines
)
623 union gru_mesqhead mqh
;
624 unsigned int limit
, head
;
625 unsigned long avalue
;
628 /* Determine if switching to first/second half of q */
629 avalue
= gru_get_amo_value(cb
);
630 head
= gru_get_amo_value_head(cb
);
631 limit
= gru_get_amo_value_limit(cb
);
633 qlines
= mqd
->qlines
;
634 half
= (limit
!= qlines
);
637 mqh
= gru_mesq_head(qlines
/ 2 + 1, qlines
);
639 mqh
= gru_mesq_head(2, qlines
/ 2 + 1);
641 /* Try to get lock for switching head pointer */
642 gru_gamir(cb
, EOP_IR_CLR
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
, IMA
);
643 if (gru_wait(cb
) != CBS_IDLE
)
645 if (!gru_get_amo_value(cb
)) {
646 STAT(mesq_qf_locked
);
647 return MQE_QUEUE_FULL
;
650 /* Got the lock. Send optional NOP if queue not full, */
652 if (send_noop_message(cb
, mqd
, mesg
)) {
653 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
),
655 if (gru_wait(cb
) != CBS_IDLE
)
657 STAT(mesq_qf_noop_not_full
);
663 /* Then flip queuehead to other half of queue. */
664 gru_gamer(cb
, EOP_ERR_CSWAP
, mqd
->mq_gpa
, XTYPE_DW
, mqh
.val
, avalue
,
666 if (gru_wait(cb
) != CBS_IDLE
)
669 /* If not successfully in swapping queue head, clear the hstatus lock */
670 if (gru_get_amo_value(cb
) != avalue
) {
671 STAT(mesq_qf_switch_head_failed
);
672 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
,
674 if (gru_wait(cb
) != CBS_IDLE
)
679 STAT(mesq_qf_unexpected_error
);
680 return MQE_UNEXPECTED_CB_ERR
;
684 * Send a cross-partition interrupt to the SSI that contains the target
685 * message queue. Normally, the interrupt is automatically delivered by hardware
686 * but some error conditions require explicit delivery.
688 static void send_message_queue_interrupt(struct gru_message_queue_desc
*mqd
)
690 if (mqd
->interrupt_vector
)
691 uv_hub_send_ipi(mqd
->interrupt_pnode
, mqd
->interrupt_apicid
,
692 mqd
->interrupt_vector
);
696 * Handle a PUT failure. Note: if message was a 2-line message, one of the
697 * lines might have successfully have been written. Before sending the
698 * message, "present" must be cleared in BOTH lines to prevent the receiver
699 * from prematurely seeing the full message.
701 static int send_message_put_nacked(void *cb
, struct gru_message_queue_desc
*mqd
,
702 void *mesg
, int lines
)
706 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
708 gru_vset(cb
, m
, 0, XTYPE_CL
, lines
, 1, IMA
);
709 if (gru_wait(cb
) != CBS_IDLE
)
710 return MQE_UNEXPECTED_CB_ERR
;
712 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, lines
, 1, IMA
);
713 if (gru_wait(cb
) != CBS_IDLE
)
714 return MQE_UNEXPECTED_CB_ERR
;
715 send_message_queue_interrupt(mqd
);
720 * Handle a gru_mesq failure. Some of these failures are software recoverable
723 static int send_message_failure(void *cb
, struct gru_message_queue_desc
*mqd
,
724 void *mesg
, int lines
)
726 int substatus
, ret
= 0;
728 substatus
= gru_get_cb_message_queue_substatus(cb
);
731 STAT(mesq_send_unexpected_error
);
732 ret
= MQE_UNEXPECTED_CB_ERR
;
734 case CBSS_LB_OVERFLOWED
:
735 STAT(mesq_send_lb_overflow
);
736 ret
= MQE_CONGESTION
;
738 case CBSS_QLIMIT_REACHED
:
739 STAT(mesq_send_qlimit_reached
);
740 ret
= send_message_queue_full(cb
, mqd
, mesg
, lines
);
742 case CBSS_AMO_NACKED
:
743 STAT(mesq_send_amo_nacked
);
744 ret
= MQE_CONGESTION
;
746 case CBSS_PUT_NACKED
:
747 STAT(mesq_send_put_nacked
);
748 ret
= send_message_put_nacked(cb
, mqd
, mesg
, lines
);
750 case CBSS_PAGE_OVERFLOW
:
751 STAT(mesq_page_overflow
);
760 * Send a message to a message queue
761 * mqd message queue descriptor
762 * mesg message. ust be vaddr within a GSEG
763 * bytes message size (<= 2 CL)
765 int gru_send_message_gpa(struct gru_message_queue_desc
*mqd
, void *mesg
,
768 struct message_header
*mhdr
;
771 int istatus
, clines
, ret
;
774 BUG_ON(bytes
< sizeof(int) || bytes
> 2 * GRU_CACHE_LINE_BYTES
);
776 clines
= DIV_ROUND_UP(bytes
, GRU_CACHE_LINE_BYTES
);
777 if (gru_get_cpu_resources(bytes
, &cb
, &dsr
))
778 return MQE_BUG_NO_RESOURCES
;
779 memcpy(dsr
, mesg
, bytes
);
781 mhdr
->present
= MQS_FULL
;
782 mhdr
->lines
= clines
;
784 mhdr
->present2
= get_present2(mhdr
);
785 restore_present2(mhdr
, MQS_FULL
);
790 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), clines
, IMA
);
791 istatus
= gru_wait(cb
);
792 if (istatus
!= CBS_IDLE
)
793 ret
= send_message_failure(cb
, mqd
, dsr
, clines
);
794 } while (ret
== MQIE_AGAIN
);
795 gru_free_cpu_resources(cb
, dsr
);
798 STAT(mesq_send_failed
);
801 EXPORT_SYMBOL_GPL(gru_send_message_gpa
);
804 * Advance the receive pointer for the queue to the next message.
806 void gru_free_message(struct gru_message_queue_desc
*mqd
, void *mesg
)
808 struct message_queue
*mq
= mqd
->mq
;
809 struct message_header
*mhdr
= mq
->next
;
812 int lines
= mhdr
->lines
;
815 restore_present2(mhdr
, MQS_EMPTY
);
816 mhdr
->present
= MQS_EMPTY
;
819 next
= pnext
+ GRU_CACHE_LINE_BYTES
* lines
;
820 if (next
== mq
->limit
) {
823 } else if (pnext
< mq
->start2
&& next
>= mq
->start2
) {
828 mq
->hstatus
[half
] = 1;
831 EXPORT_SYMBOL_GPL(gru_free_message
);
834 * Get next message from message queue. Return NULL if no message
835 * present. User must call next_message() to move to next message.
838 void *gru_get_next_message(struct gru_message_queue_desc
*mqd
)
840 struct message_queue
*mq
= mqd
->mq
;
841 struct message_header
*mhdr
= mq
->next
;
842 int present
= mhdr
->present
;
844 /* skip NOOP messages */
845 while (present
== MQS_NOOP
) {
846 gru_free_message(mqd
, mhdr
);
848 present
= mhdr
->present
;
851 /* Wait for both halves of 2 line messages */
852 if (present
== MQS_FULL
&& mhdr
->lines
== 2 &&
853 get_present2(mhdr
) == MQS_EMPTY
)
857 STAT(mesq_receive_none
);
861 if (mhdr
->lines
== 2)
862 restore_present2(mhdr
, mhdr
->present2
);
867 EXPORT_SYMBOL_GPL(gru_get_next_message
);
869 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
872 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
874 int gru_read_gpa(unsigned long *value
, unsigned long gpa
)
881 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
882 return MQE_BUG_NO_RESOURCES
;
884 gru_vload_phys(cb
, gpa
, gru_get_tri(dsr
), iaa
, IMA
);
887 *value
= *(unsigned long *)dsr
;
888 gru_free_cpu_resources(cb
, dsr
);
891 EXPORT_SYMBOL_GPL(gru_read_gpa
);
895 * Copy a block of data using the GRU resources
897 int gru_copy_gpa(unsigned long dest_gpa
, unsigned long src_gpa
,
905 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
906 return MQE_BUG_NO_RESOURCES
;
907 gru_bcopy(cb
, src_gpa
, dest_gpa
, gru_get_tri(dsr
),
908 XTYPE_B
, bytes
, GRU_NUM_KERNEL_DSR_CL
, IMA
);
910 gru_free_cpu_resources(cb
, dsr
);
913 EXPORT_SYMBOL_GPL(gru_copy_gpa
);
915 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
916 /* Temp - will delete after we gain confidence in the GRU */
918 static int quicktest0(unsigned long arg
)
927 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES
, &cb
, &dsr
))
928 return MQE_BUG_NO_RESOURCES
;
933 gru_vload(cb
, uv_gpa(&word0
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
934 if (gru_wait(cb
) != CBS_IDLE
) {
935 printk(KERN_DEBUG
"GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
940 printk(KERN_DEBUG
"GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p
);
943 gru_vstore(cb
, uv_gpa(&word1
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
944 if (gru_wait(cb
) != CBS_IDLE
) {
945 printk(KERN_DEBUG
"GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
949 if (word0
!= word1
|| word1
!= MAGIC
) {
951 "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
952 smp_processor_id(), word1
, MAGIC
);
958 gru_free_cpu_resources(cb
, dsr
);
962 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
964 static int quicktest1(unsigned long arg
)
966 struct gru_message_queue_desc mqd
;
970 char mes
[GRU_CACHE_LINE_BYTES
], *m
;
972 /* Need 1K cacheline aligned that does not cross page boundary */
973 p
= kmalloc(4096, 0);
976 mq
= ALIGNUP(p
, 1024);
977 memset(mes
, 0xee, sizeof(mes
));
980 gru_create_message_queue(&mqd
, mq
, 8 * GRU_CACHE_LINE_BYTES
, 0, 0, 0);
981 for (i
= 0; i
< 6; i
++) {
984 ret
= gru_send_message_gpa(&mqd
, mes
, sizeof(mes
));
985 } while (ret
== MQE_CONGESTION
);
989 if (ret
!= MQE_QUEUE_FULL
|| i
!= 4) {
990 printk(KERN_DEBUG
"GRU:%d quicktest1: unexpect status %d, i %d\n",
991 smp_processor_id(), ret
, i
);
995 for (i
= 0; i
< 6; i
++) {
996 m
= gru_get_next_message(&mqd
);
999 gru_free_message(&mqd
, m
);
1002 printk(KERN_DEBUG
"GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
1003 smp_processor_id(), i
, m
, m
? m
[8] : -1);
1013 static int quicktest2(unsigned long arg
)
1015 static DECLARE_COMPLETION(cmp
);
1022 struct gru_control_block_status
*gen
;
1023 int i
, k
, istatus
, bytes
;
1025 bytes
= numcb
* 4 * 8;
1026 buf
= kmalloc(bytes
, GFP_KERNEL
);
1031 han
= gru_reserve_async_resources(blade_id
, numcb
, 0, &cmp
);
1035 gru_lock_async_resource(han
, &cb0
, NULL
);
1036 memset(buf
, 0xee, bytes
);
1037 for (i
= 0; i
< numcb
; i
++)
1038 gru_vset(cb0
+ i
* GRU_HANDLE_STRIDE
, uv_gpa(&buf
[i
* 4]), 0,
1039 XTYPE_DW
, 4, 1, IMA_INTERRUPT
);
1044 gru_wait_async_cbr(han
);
1045 for (i
= 0; i
< numcb
; i
++) {
1046 cb
= cb0
+ i
* GRU_HANDLE_STRIDE
;
1047 istatus
= gru_check_status(cb
);
1048 if (istatus
!= CBS_ACTIVE
&& istatus
!= CBS_CALL_OS
)
1053 if (istatus
!= CBS_IDLE
) {
1054 printk(KERN_DEBUG
"GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i
);
1056 } else if (buf
[4 * i
] || buf
[4 * i
+ 1] || buf
[4 * i
+ 2] ||
1058 printk(KERN_DEBUG
"GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1059 smp_processor_id(), i
, buf
[4 * i
], buf
[4 * i
+ 1], buf
[4 * i
+ 2], buf
[4 * i
+ 3]);
1064 gen
->istatus
= CBS_CALL_OS
; /* don't handle this CBR again */
1068 gru_unlock_async_resource(han
);
1069 gru_release_async_resources(han
);
1076 static int quicktest3(unsigned long arg
)
1078 char buf1
[BUFSIZE
], buf2
[BUFSIZE
];
1081 memset(buf2
, 0, sizeof(buf2
));
1082 memset(buf1
, get_cycles() & 255, sizeof(buf1
));
1083 gru_copy_gpa(uv_gpa(buf2
), uv_gpa(buf1
), BUFSIZE
);
1084 if (memcmp(buf1
, buf2
, BUFSIZE
)) {
1085 printk(KERN_DEBUG
"GRU:%d quicktest3 error\n", smp_processor_id());
1092 * Debugging only. User hook for various kernel tests
1095 int gru_ktest(unsigned long arg
)
1099 switch (arg
& 0xff) {
1101 ret
= quicktest0(arg
);
1104 ret
= quicktest1(arg
);
1107 ret
= quicktest2(arg
);
1110 ret
= quicktest3(arg
);
1113 ret
= gru_free_kernel_contexts();
1120 int gru_kservices_init(void)
1125 void gru_kservices_exit(void)
1127 if (gru_free_kernel_contexts())