2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
30 #include <linux/miscdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/interrupt.h>
33 #include <linux/uaccess.h>
34 #include <linux/delay.h>
37 #include "grutables.h"
38 #include "grukservices.h"
39 #include "gru_instructions.h"
40 #include <asm/uv/uv_hub.h>
45 * The following is an interim algorithm for management of kernel GRU
46 * resources. This will likely be replaced when we better understand the
47 * kernel/user requirements.
49 * Blade percpu resources reserved for kernel use. These resources are
50 * reserved whenever the the kernel context for the blade is loaded. Note
51 * that the kernel context is not guaranteed to be always available. It is
52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
58 * Each blade has one "kernel context" that owns GRU kernel resources
59 * located on the blade. Kernel drivers use GRU resources in this context
60 * for sending messages, zeroing memory, etc.
62 * The kernel context is dynamically loaded on demand. If it is not in
63 * use by the kernel, the kernel context can be unloaded & given to a user.
64 * The kernel context will be reloaded when needed. This may require that
65 * a context be stolen from a user.
66 * NOTE: frequent unloading/reloading of the kernel context is
67 * expensive. We are depending on batch schedulers, cpusets, sane
68 * drivers or some other mechanism to prevent the need for frequent
71 * The kernel context consists of two parts:
72 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
73 * Each cpu has it's own private resources & does not share them
74 * with other cpus. These resources are used serially, ie,
75 * locked, used & unlocked on each call to a function in
77 * (Now that we have dynamic loading of kernel contexts, I
78 * may rethink this & allow sharing between cpus....)
80 * - Additional resources can be reserved long term & used directly
81 * by UV drivers located in the kernel. Drivers using these GRU
82 * resources can use asynchronous GRU instructions that send
83 * interrupts on completion.
84 * - these resources must be explicitly locked/unlocked
85 * - locked resources prevent (obviously) the kernel
86 * context from being unloaded.
87 * - drivers using these resource directly issue their own
88 * GRU instruction and must wait/check completion.
90 * When these resources are reserved, the caller can optionally
91 * associate a wait_queue with the resources and use asynchronous
92 * GRU instructions. When an async GRU instruction completes, the
93 * driver will do a wakeup on the event.
98 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
99 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
100 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
101 #define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
102 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
103 #define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
105 #define GRU_NUM_KERNEL_CBR 1
106 #define GRU_NUM_KERNEL_DSR_BYTES 256
107 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
108 GRU_CACHE_LINE_BYTES)
110 /* GRU instruction attributes for all instructions */
111 #define IMA IMA_CB_DELAY
113 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
114 #define __gru_cacheline_aligned__ \
115 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
117 #define MAGIC 0x1234567887654321UL
119 /* Default retry count for GRU errors on kernel instructions */
120 #define EXCEPTION_RETRY_LIMIT 3
122 /* Status of message queue sections */
127 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
128 /* optimized for x86_64 */
129 struct message_queue
{
130 union gru_mesqhead head __gru_cacheline_aligned__
; /* CL 0 */
131 int qlines
; /* DW 1 */
133 void *next __gru_cacheline_aligned__
;/* CL 1 */
137 char data ____cacheline_aligned
; /* CL 2 */
140 /* First word in every message - used by mesq interface */
141 struct message_header
{
148 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
151 * Reload the blade's kernel context into a GRU chiplet. Called holding
152 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
154 static void gru_load_kernel_context(struct gru_blade_state
*bs
, int blade_id
)
156 struct gru_state
*gru
;
157 struct gru_thread_state
*kgts
;
161 up_read(&bs
->bs_kgts_sema
);
162 down_write(&bs
->bs_kgts_sema
);
165 bs
->bs_kgts
= gru_alloc_gts(NULL
, 0, 0, 0, 0);
169 STAT(load_kernel_context
);
170 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
171 kgts
->ts_cbr_au_count
= GRU_CB_COUNT_TO_AU(
172 GRU_NUM_KERNEL_CBR
* ncpus
+ bs
->bs_async_cbrs
);
173 kgts
->ts_dsr_au_count
= GRU_DS_BYTES_TO_AU(
174 GRU_NUM_KERNEL_DSR_BYTES
* ncpus
+
175 bs
->bs_async_dsr_bytes
);
176 while (!gru_assign_gru_context(kgts
, blade_id
)) {
178 gru_steal_context(kgts
, blade_id
);
180 gru_load_context(kgts
);
181 gru
= bs
->bs_kgts
->ts_gru
;
182 vaddr
= gru
->gs_gru_base_vaddr
;
183 ctxnum
= kgts
->ts_ctxnum
;
184 bs
->kernel_cb
= get_gseg_base_address_cb(vaddr
, ctxnum
, 0);
185 bs
->kernel_dsr
= get_gseg_base_address_ds(vaddr
, ctxnum
, 0);
187 downgrade_write(&bs
->bs_kgts_sema
);
191 * Free all kernel contexts that are not currently in use.
192 * Returns 0 if all freed, else number of inuse context.
194 static int gru_free_kernel_contexts(void)
196 struct gru_blade_state
*bs
;
197 struct gru_thread_state
*kgts
;
200 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++) {
204 if (down_write_trylock(&bs
->bs_kgts_sema
)) {
206 if (kgts
&& kgts
->ts_gru
)
207 gru_unload_context(kgts
, 0);
210 up_write(&bs
->bs_kgts_sema
);
219 * Lock & load the kernel context for the specified blade.
221 static struct gru_blade_state
*gru_lock_kernel_context(int blade_id
)
223 struct gru_blade_state
*bs
;
225 STAT(lock_kernel_context
);
226 bs
= gru_base
[blade_id
];
228 down_read(&bs
->bs_kgts_sema
);
229 if (!bs
->bs_kgts
|| !bs
->bs_kgts
->ts_gru
)
230 gru_load_kernel_context(bs
, blade_id
);
236 * Unlock the kernel context for the specified blade. Context is not
237 * unloaded but may be stolen before next use.
239 static void gru_unlock_kernel_context(int blade_id
)
241 struct gru_blade_state
*bs
;
243 bs
= gru_base
[blade_id
];
244 up_read(&bs
->bs_kgts_sema
);
245 STAT(unlock_kernel_context
);
249 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
250 * - returns with preemption disabled
252 static int gru_get_cpu_resources(int dsr_bytes
, void **cb
, void **dsr
)
254 struct gru_blade_state
*bs
;
257 BUG_ON(dsr_bytes
> GRU_NUM_KERNEL_DSR_BYTES
);
259 bs
= gru_lock_kernel_context(uv_numa_blade_id());
260 lcpu
= uv_blade_processor_id();
261 *cb
= bs
->kernel_cb
+ lcpu
* GRU_HANDLE_STRIDE
;
262 *dsr
= bs
->kernel_dsr
+ lcpu
* GRU_NUM_KERNEL_DSR_BYTES
;
267 * Free the current cpus reserved DSR/CBR resources.
269 static void gru_free_cpu_resources(void *cb
, void *dsr
)
271 gru_unlock_kernel_context(uv_numa_blade_id());
276 * Reserve GRU resources to be used asynchronously.
277 * Note: currently supports only 1 reservation per blade.
280 * blade_id - blade on which resources should be reserved
281 * cbrs - number of CBRs
282 * dsr_bytes - number of DSR bytes needed
284 * handle to identify resource
285 * (0 = async resources already reserved)
287 unsigned long gru_reserve_async_resources(int blade_id
, int cbrs
, int dsr_bytes
,
288 struct completion
*cmp
)
290 struct gru_blade_state
*bs
;
291 struct gru_thread_state
*kgts
;
294 bs
= gru_base
[blade_id
];
296 down_write(&bs
->bs_kgts_sema
);
298 /* Verify no resources already reserved */
299 if (bs
->bs_async_dsr_bytes
+ bs
->bs_async_cbrs
)
301 bs
->bs_async_dsr_bytes
= dsr_bytes
;
302 bs
->bs_async_cbrs
= cbrs
;
303 bs
->bs_async_wq
= cmp
;
306 /* Resources changed. Unload context if already loaded */
307 if (kgts
&& kgts
->ts_gru
)
308 gru_unload_context(kgts
, 0);
309 ret
= ASYNC_BID_TO_HAN(blade_id
);
312 up_write(&bs
->bs_kgts_sema
);
317 * Release async resources previously reserved.
320 * han - handle to identify resources
322 void gru_release_async_resources(unsigned long han
)
324 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
326 down_write(&bs
->bs_kgts_sema
);
327 bs
->bs_async_dsr_bytes
= 0;
328 bs
->bs_async_cbrs
= 0;
329 bs
->bs_async_wq
= NULL
;
330 up_write(&bs
->bs_kgts_sema
);
334 * Wait for async GRU instructions to complete.
337 * han - handle to identify resources
339 void gru_wait_async_cbr(unsigned long han
)
341 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
343 wait_for_completion(bs
->bs_async_wq
);
348 * Lock previous reserved async GRU resources
351 * han - handle to identify resources
353 * cb - pointer to first CBR
354 * dsr - pointer to first DSR
356 void gru_lock_async_resource(unsigned long han
, void **cb
, void **dsr
)
358 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
359 int blade_id
= ASYNC_HAN_TO_BID(han
);
362 gru_lock_kernel_context(blade_id
);
363 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
365 *cb
= bs
->kernel_cb
+ ncpus
* GRU_HANDLE_STRIDE
;
367 *dsr
= bs
->kernel_dsr
+ ncpus
* GRU_NUM_KERNEL_DSR_BYTES
;
371 * Unlock previous reserved async GRU resources
374 * han - handle to identify resources
376 void gru_unlock_async_resource(unsigned long han
)
378 int blade_id
= ASYNC_HAN_TO_BID(han
);
380 gru_unlock_kernel_context(blade_id
);
383 /*----------------------------------------------------------------------*/
384 int gru_get_cb_exception_detail(void *cb
,
385 struct control_block_extended_exc_detail
*excdet
)
387 struct gru_control_block_extended
*cbe
;
388 struct gru_blade_state
*bs
;
392 cbrnum
= thread_cbr_number(bs
->bs_kgts
, get_cb_number(cb
));
393 cbe
= get_cbe(GRUBASE(cb
), cbrnum
);
394 gru_flush_cache(cbe
); /* CBE not coherent */
395 excdet
->opc
= cbe
->opccpy
;
396 excdet
->exopc
= cbe
->exopccpy
;
397 excdet
->ecause
= cbe
->ecause
;
398 excdet
->exceptdet0
= cbe
->idef1upd
;
399 excdet
->exceptdet1
= cbe
->idef3upd
;
400 gru_flush_cache(cbe
);
404 char *gru_get_cb_exception_detail_str(int ret
, void *cb
,
407 struct gru_control_block_status
*gen
= (void *)cb
;
408 struct control_block_extended_exc_detail excdet
;
410 if (ret
> 0 && gen
->istatus
== CBS_EXCEPTION
) {
411 gru_get_cb_exception_detail(cb
, &excdet
);
413 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
414 "excdet0 0x%lx, excdet1 0x%x",
415 gen
, excdet
.opc
, excdet
.exopc
, excdet
.ecause
,
416 excdet
.exceptdet0
, excdet
.exceptdet1
);
418 snprintf(buf
, size
, "No exception");
423 static int gru_wait_idle_or_exception(struct gru_control_block_status
*gen
)
425 while (gen
->istatus
>= CBS_ACTIVE
) {
432 static int gru_retry_exception(void *cb
)
434 struct gru_control_block_status
*gen
= (void *)cb
;
435 struct control_block_extended_exc_detail excdet
;
436 int retry
= EXCEPTION_RETRY_LIMIT
;
439 if (gru_wait_idle_or_exception(gen
) == CBS_IDLE
)
441 if (gru_get_cb_message_queue_substatus(cb
))
442 return CBS_EXCEPTION
;
443 gru_get_cb_exception_detail(cb
, &excdet
);
444 if ((excdet
.ecause
& ~EXCEPTION_RETRY_BITS
) ||
445 (excdet
.cbrexecstatus
& CBR_EXS_ABORT_OCC
))
450 gru_flush_cache(gen
);
452 return CBS_EXCEPTION
;
455 int gru_check_status_proc(void *cb
)
457 struct gru_control_block_status
*gen
= (void *)cb
;
461 if (ret
!= CBS_EXCEPTION
)
463 return gru_retry_exception(cb
);
467 int gru_wait_proc(void *cb
)
469 struct gru_control_block_status
*gen
= (void *)cb
;
472 ret
= gru_wait_idle_or_exception(gen
);
473 if (ret
== CBS_EXCEPTION
)
474 ret
= gru_retry_exception(cb
);
479 void gru_abort(int ret
, void *cb
, char *str
)
481 char buf
[GRU_EXC_STR_SIZE
];
483 panic("GRU FATAL ERROR: %s - %s\n", str
,
484 gru_get_cb_exception_detail_str(ret
, cb
, buf
, sizeof(buf
)));
487 void gru_wait_abort_proc(void *cb
)
491 ret
= gru_wait_proc(cb
);
493 gru_abort(ret
, cb
, "gru_wait_abort");
497 /*------------------------------ MESSAGE QUEUES -----------------------------*/
499 /* Internal status . These are NOT returned to the user. */
500 #define MQIE_AGAIN -1 /* try again */
504 * Save/restore the "present" flag that is in the second line of 2-line
507 static inline int get_present2(void *p
)
509 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
510 return mhdr
->present
;
513 static inline void restore_present2(void *p
, int val
)
515 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
520 * Create a message queue.
521 * qlines - message queue size in cache lines. Includes 2-line header.
523 int gru_create_message_queue(struct gru_message_queue_desc
*mqd
,
524 void *p
, unsigned int bytes
, int nasid
, int vector
, int apicid
)
526 struct message_queue
*mq
= p
;
529 qlines
= bytes
/ GRU_CACHE_LINE_BYTES
- 2;
530 memset(mq
, 0, bytes
);
531 mq
->start
= &mq
->data
;
532 mq
->start2
= &mq
->data
+ (qlines
/ 2 - 1) * GRU_CACHE_LINE_BYTES
;
533 mq
->next
= &mq
->data
;
534 mq
->limit
= &mq
->data
+ (qlines
- 2) * GRU_CACHE_LINE_BYTES
;
538 mq
->head
= gru_mesq_head(2, qlines
/ 2 + 1);
540 mqd
->mq_gpa
= uv_gpa(mq
);
541 mqd
->qlines
= qlines
;
542 mqd
->interrupt_pnode
= UV_NASID_TO_PNODE(nasid
);
543 mqd
->interrupt_vector
= vector
;
544 mqd
->interrupt_apicid
= apicid
;
547 EXPORT_SYMBOL_GPL(gru_create_message_queue
);
550 * Send a NOOP message to a message queue
552 * 0 - if queue is full after the send. This is the normal case
553 * but various races can change this.
554 * -1 - if mesq sent successfully but queue not full
555 * >0 - unexpected error. MQE_xxx returned
557 static int send_noop_message(void *cb
, struct gru_message_queue_desc
*mqd
,
560 const struct message_header noop_header
= {
561 .present
= MQS_NOOP
, .lines
= 1};
564 struct message_header save_mhdr
, *mhdr
= mesg
;
569 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), 1, IMA
);
573 substatus
= gru_get_cb_message_queue_substatus(cb
);
576 STAT(mesq_noop_unexpected_error
);
577 ret
= MQE_UNEXPECTED_CB_ERR
;
579 case CBSS_LB_OVERFLOWED
:
580 STAT(mesq_noop_lb_overflow
);
581 ret
= MQE_CONGESTION
;
583 case CBSS_QLIMIT_REACHED
:
584 STAT(mesq_noop_qlimit_reached
);
587 case CBSS_AMO_NACKED
:
588 STAT(mesq_noop_amo_nacked
);
589 ret
= MQE_CONGESTION
;
591 case CBSS_PUT_NACKED
:
592 STAT(mesq_noop_put_nacked
);
593 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
594 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, 1, 1,
596 if (gru_wait(cb
) == CBS_IDLE
)
599 ret
= MQE_UNEXPECTED_CB_ERR
;
601 case CBSS_PAGE_OVERFLOW
:
611 * Handle a gru_mesq full.
613 static int send_message_queue_full(void *cb
, struct gru_message_queue_desc
*mqd
,
614 void *mesg
, int lines
)
616 union gru_mesqhead mqh
;
617 unsigned int limit
, head
;
618 unsigned long avalue
;
621 /* Determine if switching to first/second half of q */
622 avalue
= gru_get_amo_value(cb
);
623 head
= gru_get_amo_value_head(cb
);
624 limit
= gru_get_amo_value_limit(cb
);
626 qlines
= mqd
->qlines
;
627 half
= (limit
!= qlines
);
630 mqh
= gru_mesq_head(qlines
/ 2 + 1, qlines
);
632 mqh
= gru_mesq_head(2, qlines
/ 2 + 1);
634 /* Try to get lock for switching head pointer */
635 gru_gamir(cb
, EOP_IR_CLR
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
, IMA
);
636 if (gru_wait(cb
) != CBS_IDLE
)
638 if (!gru_get_amo_value(cb
)) {
639 STAT(mesq_qf_locked
);
640 return MQE_QUEUE_FULL
;
643 /* Got the lock. Send optional NOP if queue not full, */
645 if (send_noop_message(cb
, mqd
, mesg
)) {
646 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
),
648 if (gru_wait(cb
) != CBS_IDLE
)
650 STAT(mesq_qf_noop_not_full
);
656 /* Then flip queuehead to other half of queue. */
657 gru_gamer(cb
, EOP_ERR_CSWAP
, mqd
->mq_gpa
, XTYPE_DW
, mqh
.val
, avalue
,
659 if (gru_wait(cb
) != CBS_IDLE
)
662 /* If not successfully in swapping queue head, clear the hstatus lock */
663 if (gru_get_amo_value(cb
) != avalue
) {
664 STAT(mesq_qf_switch_head_failed
);
665 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
,
667 if (gru_wait(cb
) != CBS_IDLE
)
672 STAT(mesq_qf_unexpected_error
);
673 return MQE_UNEXPECTED_CB_ERR
;
677 * Send a cross-partition interrupt to the SSI that contains the target
678 * message queue. Normally, the interrupt is automatically delivered by hardware
679 * but some error conditions require explicit delivery.
681 static void send_message_queue_interrupt(struct gru_message_queue_desc
*mqd
)
683 if (mqd
->interrupt_vector
)
684 uv_hub_send_ipi(mqd
->interrupt_pnode
, mqd
->interrupt_apicid
,
685 mqd
->interrupt_vector
);
689 * Handle a PUT failure. Note: if message was a 2-line message, one of the
690 * lines might have successfully have been written. Before sending the
691 * message, "present" must be cleared in BOTH lines to prevent the receiver
692 * from prematurely seeing the full message.
694 static int send_message_put_nacked(void *cb
, struct gru_message_queue_desc
*mqd
,
695 void *mesg
, int lines
)
699 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
701 gru_vset(cb
, m
, 0, XTYPE_CL
, lines
, 1, IMA
);
702 if (gru_wait(cb
) != CBS_IDLE
)
703 return MQE_UNEXPECTED_CB_ERR
;
705 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, lines
, 1, IMA
);
706 if (gru_wait(cb
) != CBS_IDLE
)
707 return MQE_UNEXPECTED_CB_ERR
;
708 send_message_queue_interrupt(mqd
);
713 * Handle a gru_mesq failure. Some of these failures are software recoverable
716 static int send_message_failure(void *cb
, struct gru_message_queue_desc
*mqd
,
717 void *mesg
, int lines
)
719 int substatus
, ret
= 0;
721 substatus
= gru_get_cb_message_queue_substatus(cb
);
724 STAT(mesq_send_unexpected_error
);
725 ret
= MQE_UNEXPECTED_CB_ERR
;
727 case CBSS_LB_OVERFLOWED
:
728 STAT(mesq_send_lb_overflow
);
729 ret
= MQE_CONGESTION
;
731 case CBSS_QLIMIT_REACHED
:
732 STAT(mesq_send_qlimit_reached
);
733 ret
= send_message_queue_full(cb
, mqd
, mesg
, lines
);
735 case CBSS_AMO_NACKED
:
736 STAT(mesq_send_amo_nacked
);
737 ret
= MQE_CONGESTION
;
739 case CBSS_PUT_NACKED
:
740 STAT(mesq_send_put_nacked
);
741 ret
= send_message_put_nacked(cb
, mqd
, mesg
, lines
);
750 * Send a message to a message queue
751 * mqd message queue descriptor
752 * mesg message. ust be vaddr within a GSEG
753 * bytes message size (<= 2 CL)
755 int gru_send_message_gpa(struct gru_message_queue_desc
*mqd
, void *mesg
,
758 struct message_header
*mhdr
;
761 int istatus
, clines
, ret
;
764 BUG_ON(bytes
< sizeof(int) || bytes
> 2 * GRU_CACHE_LINE_BYTES
);
766 clines
= DIV_ROUND_UP(bytes
, GRU_CACHE_LINE_BYTES
);
767 if (gru_get_cpu_resources(bytes
, &cb
, &dsr
))
768 return MQE_BUG_NO_RESOURCES
;
769 memcpy(dsr
, mesg
, bytes
);
771 mhdr
->present
= MQS_FULL
;
772 mhdr
->lines
= clines
;
774 mhdr
->present2
= get_present2(mhdr
);
775 restore_present2(mhdr
, MQS_FULL
);
780 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), clines
, IMA
);
781 istatus
= gru_wait(cb
);
782 if (istatus
!= CBS_IDLE
)
783 ret
= send_message_failure(cb
, mqd
, dsr
, clines
);
784 } while (ret
== MQIE_AGAIN
);
785 gru_free_cpu_resources(cb
, dsr
);
788 STAT(mesq_send_failed
);
791 EXPORT_SYMBOL_GPL(gru_send_message_gpa
);
794 * Advance the receive pointer for the queue to the next message.
796 void gru_free_message(struct gru_message_queue_desc
*mqd
, void *mesg
)
798 struct message_queue
*mq
= mqd
->mq
;
799 struct message_header
*mhdr
= mq
->next
;
802 int lines
= mhdr
->lines
;
805 restore_present2(mhdr
, MQS_EMPTY
);
806 mhdr
->present
= MQS_EMPTY
;
809 next
= pnext
+ GRU_CACHE_LINE_BYTES
* lines
;
810 if (next
== mq
->limit
) {
813 } else if (pnext
< mq
->start2
&& next
>= mq
->start2
) {
818 mq
->hstatus
[half
] = 1;
821 EXPORT_SYMBOL_GPL(gru_free_message
);
824 * Get next message from message queue. Return NULL if no message
825 * present. User must call next_message() to move to next message.
828 void *gru_get_next_message(struct gru_message_queue_desc
*mqd
)
830 struct message_queue
*mq
= mqd
->mq
;
831 struct message_header
*mhdr
= mq
->next
;
832 int present
= mhdr
->present
;
834 /* skip NOOP messages */
836 while (present
== MQS_NOOP
) {
837 gru_free_message(mqd
, mhdr
);
839 present
= mhdr
->present
;
842 /* Wait for both halves of 2 line messages */
843 if (present
== MQS_FULL
&& mhdr
->lines
== 2 &&
844 get_present2(mhdr
) == MQS_EMPTY
)
848 STAT(mesq_receive_none
);
852 if (mhdr
->lines
== 2)
853 restore_present2(mhdr
, mhdr
->present2
);
857 EXPORT_SYMBOL_GPL(gru_get_next_message
);
859 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
862 * Copy a block of data using the GRU resources
864 int gru_copy_gpa(unsigned long dest_gpa
, unsigned long src_gpa
,
872 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
873 return MQE_BUG_NO_RESOURCES
;
874 gru_bcopy(cb
, src_gpa
, dest_gpa
, gru_get_tri(dsr
),
875 XTYPE_B
, bytes
, GRU_NUM_KERNEL_DSR_CL
, IMA
);
877 gru_free_cpu_resources(cb
, dsr
);
880 EXPORT_SYMBOL_GPL(gru_copy_gpa
);
882 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
883 /* Temp - will delete after we gain confidence in the GRU */
885 static int quicktest0(unsigned long arg
)
894 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES
, &cb
, &dsr
))
895 return MQE_BUG_NO_RESOURCES
;
900 gru_vload(cb
, uv_gpa(&word0
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
901 if (gru_wait(cb
) != CBS_IDLE
) {
902 printk(KERN_DEBUG
"GRU quicktest0: CBR failure 1\n");
907 printk(KERN_DEBUG
"GRU: quicktest0 bad magic 0x%lx\n", *p
);
910 gru_vstore(cb
, uv_gpa(&word1
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
911 if (gru_wait(cb
) != CBS_IDLE
) {
912 printk(KERN_DEBUG
"GRU quicktest0: CBR failure 2\n");
916 if (word0
!= word1
|| word1
!= MAGIC
) {
918 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
925 gru_free_cpu_resources(cb
, dsr
);
929 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
931 static int quicktest1(unsigned long arg
)
933 struct gru_message_queue_desc mqd
;
937 char mes
[GRU_CACHE_LINE_BYTES
], *m
;
939 /* Need 1K cacheline aligned that does not cross page boundary */
940 p
= kmalloc(4096, 0);
941 mq
= ALIGNUP(p
, 1024);
942 memset(mes
, 0xee, sizeof(mes
));
945 gru_create_message_queue(&mqd
, mq
, 8 * GRU_CACHE_LINE_BYTES
, 0, 0, 0);
946 for (i
= 0; i
< 6; i
++) {
949 ret
= gru_send_message_gpa(&mqd
, mes
, sizeof(mes
));
950 } while (ret
== MQE_CONGESTION
);
954 if (ret
!= MQE_QUEUE_FULL
|| i
!= 4)
957 for (i
= 0; i
< 6; i
++) {
958 m
= gru_get_next_message(&mqd
);
961 gru_free_message(&mqd
, m
);
963 ret
= (i
== 4) ? 0 : -EIO
;
970 static int quicktest2(unsigned long arg
)
972 static DECLARE_COMPLETION(cmp
);
979 int i
, k
, istatus
, bytes
;
981 bytes
= numcb
* 4 * 8;
982 buf
= kmalloc(bytes
, GFP_KERNEL
);
987 han
= gru_reserve_async_resources(blade_id
, numcb
, 0, &cmp
);
991 gru_lock_async_resource(han
, &cb0
, NULL
);
992 memset(buf
, 0xee, bytes
);
993 for (i
= 0; i
< numcb
; i
++)
994 gru_vset(cb0
+ i
* GRU_HANDLE_STRIDE
, uv_gpa(&buf
[i
* 4]), 0,
995 XTYPE_DW
, 4, 1, IMA_INTERRUPT
);
998 for (k
= 0; k
< numcb
; k
++) {
999 gru_wait_async_cbr(han
);
1000 for (i
= 0; i
< numcb
; i
++) {
1001 cb
= cb0
+ i
* GRU_HANDLE_STRIDE
;
1002 istatus
= gru_check_status(cb
);
1003 if (istatus
== CBS_ACTIVE
)
1005 if (istatus
== CBS_EXCEPTION
)
1007 else if (buf
[i
] || buf
[i
+ 1] || buf
[i
+ 2] ||
1014 gru_unlock_async_resource(han
);
1015 gru_release_async_resources(han
);
1022 * Debugging only. User hook for various kernel tests
1025 int gru_ktest(unsigned long arg
)
1029 switch (arg
& 0xff) {
1031 ret
= quicktest0(arg
);
1034 ret
= quicktest1(arg
);
1037 ret
= quicktest2(arg
);
1040 ret
= gru_free_kernel_contexts();
1047 int gru_kservices_init(void)
1052 void gru_kservices_exit(void)
1054 if (gru_free_kernel_contexts())