2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
27 #include <linux/smp_lock.h>
28 #include <linux/spinlock.h>
29 #include <linux/device.h>
30 #include <linux/miscdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/interrupt.h>
33 #include <linux/uaccess.h>
34 #include <linux/delay.h>
37 #include "grutables.h"
38 #include "grukservices.h"
39 #include "gru_instructions.h"
40 #include <asm/uv/uv_hub.h>
45 * The following is an interim algorithm for management of kernel GRU
46 * resources. This will likely be replaced when we better understand the
47 * kernel/user requirements.
49 * Blade percpu resources reserved for kernel use. These resources are
50 * reserved whenever the the kernel context for the blade is loaded. Note
51 * that the kernel context is not guaranteed to be always available. It is
52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
58 * Each blade has one "kernel context" that owns GRU kernel resources
59 * located on the blade. Kernel drivers use GRU resources in this context
60 * for sending messages, zeroing memory, etc.
62 * The kernel context is dynamically loaded on demand. If it is not in
63 * use by the kernel, the kernel context can be unloaded & given to a user.
64 * The kernel context will be reloaded when needed. This may require that
65 * a context be stolen from a user.
66 * NOTE: frequent unloading/reloading of the kernel context is
67 * expensive. We are depending on batch schedulers, cpusets, sane
68 * drivers or some other mechanism to prevent the need for frequent
71 * The kernel context consists of two parts:
72 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
73 * Each cpu has it's own private resources & does not share them
74 * with other cpus. These resources are used serially, ie,
75 * locked, used & unlocked on each call to a function in
77 * (Now that we have dynamic loading of kernel contexts, I
78 * may rethink this & allow sharing between cpus....)
80 * - Additional resources can be reserved long term & used directly
81 * by UV drivers located in the kernel. Drivers using these GRU
82 * resources can use asynchronous GRU instructions that send
83 * interrupts on completion.
84 * - these resources must be explicitly locked/unlocked
85 * - locked resources prevent (obviously) the kernel
86 * context from being unloaded.
87 * - drivers using these resource directly issue their own
88 * GRU instruction and must wait/check completion.
90 * When these resources are reserved, the caller can optionally
91 * associate a wait_queue with the resources and use asynchronous
92 * GRU instructions. When an async GRU instruction completes, the
93 * driver will do a wakeup on the event.
98 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
99 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
100 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
102 #define GRU_NUM_KERNEL_CBR 1
103 #define GRU_NUM_KERNEL_DSR_BYTES 256
104 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
105 GRU_CACHE_LINE_BYTES)
107 /* GRU instruction attributes for all instructions */
108 #define IMA IMA_CB_DELAY
110 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
111 #define __gru_cacheline_aligned__ \
112 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
114 #define MAGIC 0x1234567887654321UL
116 /* Default retry count for GRU errors on kernel instructions */
117 #define EXCEPTION_RETRY_LIMIT 3
119 /* Status of message queue sections */
124 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
125 /* optimized for x86_64 */
126 struct message_queue
{
127 union gru_mesqhead head __gru_cacheline_aligned__
; /* CL 0 */
128 int qlines
; /* DW 1 */
130 void *next __gru_cacheline_aligned__
;/* CL 1 */
134 char data ____cacheline_aligned
; /* CL 2 */
137 /* First word in every message - used by mesq interface */
138 struct message_header
{
145 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
148 * Reload the blade's kernel context into a GRU chiplet. Called holding
149 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
151 static void gru_load_kernel_context(struct gru_blade_state
*bs
, int blade_id
)
153 struct gru_state
*gru
;
154 struct gru_thread_state
*kgts
;
158 up_read(&bs
->bs_kgts_sema
);
159 down_write(&bs
->bs_kgts_sema
);
162 bs
->bs_kgts
= gru_alloc_gts(NULL
, 0, 0, 0, 0);
166 STAT(load_kernel_context
);
167 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
168 kgts
->ts_cbr_au_count
= GRU_CB_COUNT_TO_AU(
169 GRU_NUM_KERNEL_CBR
* ncpus
+ bs
->bs_async_cbrs
);
170 kgts
->ts_dsr_au_count
= GRU_DS_BYTES_TO_AU(
171 GRU_NUM_KERNEL_DSR_BYTES
* ncpus
+
172 bs
->bs_async_dsr_bytes
);
173 while (!gru_assign_gru_context(kgts
, blade_id
)) {
175 gru_steal_context(kgts
, blade_id
);
177 gru_load_context(kgts
);
178 gru
= bs
->bs_kgts
->ts_gru
;
179 vaddr
= gru
->gs_gru_base_vaddr
;
180 ctxnum
= kgts
->ts_ctxnum
;
181 bs
->kernel_cb
= get_gseg_base_address_cb(vaddr
, ctxnum
, 0);
182 bs
->kernel_dsr
= get_gseg_base_address_ds(vaddr
, ctxnum
, 0);
184 downgrade_write(&bs
->bs_kgts_sema
);
188 * Lock & load the kernel context for the specified blade.
190 static struct gru_blade_state
*gru_lock_kernel_context(int blade_id
)
192 struct gru_blade_state
*bs
;
194 STAT(lock_kernel_context
);
195 bs
= gru_base
[blade_id
];
197 down_read(&bs
->bs_kgts_sema
);
198 if (!bs
->bs_kgts
|| !bs
->bs_kgts
->ts_gru
)
199 gru_load_kernel_context(bs
, blade_id
);
205 * Unlock the kernel context for the specified blade. Context is not
206 * unloaded but may be stolen before next use.
208 static void gru_unlock_kernel_context(int blade_id
)
210 struct gru_blade_state
*bs
;
212 bs
= gru_base
[blade_id
];
213 up_read(&bs
->bs_kgts_sema
);
214 STAT(unlock_kernel_context
);
218 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
219 * - returns with preemption disabled
221 static int gru_get_cpu_resources(int dsr_bytes
, void **cb
, void **dsr
)
223 struct gru_blade_state
*bs
;
226 BUG_ON(dsr_bytes
> GRU_NUM_KERNEL_DSR_BYTES
);
228 bs
= gru_lock_kernel_context(uv_numa_blade_id());
229 lcpu
= uv_blade_processor_id();
230 *cb
= bs
->kernel_cb
+ lcpu
* GRU_HANDLE_STRIDE
;
231 *dsr
= bs
->kernel_dsr
+ lcpu
* GRU_NUM_KERNEL_DSR_BYTES
;
236 * Free the current cpus reserved DSR/CBR resources.
238 static void gru_free_cpu_resources(void *cb
, void *dsr
)
240 gru_unlock_kernel_context(uv_numa_blade_id());
245 * Reserve GRU resources to be used asynchronously.
246 * Note: currently supports only 1 reservation per blade.
249 * blade_id - blade on which resources should be reserved
250 * cbrs - number of CBRs
251 * dsr_bytes - number of DSR bytes needed
253 * handle to identify resource
254 * (0 = async resources already reserved)
256 unsigned long gru_reserve_async_resources(int blade_id
, int cbrs
, int dsr_bytes
,
257 struct completion
*cmp
)
259 struct gru_blade_state
*bs
;
260 struct gru_thread_state
*kgts
;
263 bs
= gru_base
[blade_id
];
265 down_write(&bs
->bs_kgts_sema
);
267 /* Verify no resources already reserved */
268 if (bs
->bs_async_dsr_bytes
+ bs
->bs_async_cbrs
)
270 bs
->bs_async_dsr_bytes
= dsr_bytes
;
271 bs
->bs_async_cbrs
= cbrs
;
272 bs
->bs_async_wq
= cmp
;
275 /* Resources changed. Unload context if already loaded */
276 if (kgts
&& kgts
->ts_gru
)
277 gru_unload_context(kgts
, 0);
278 ret
= ASYNC_BID_TO_HAN(blade_id
);
281 up_write(&bs
->bs_kgts_sema
);
286 * Release async resources previously reserved.
289 * han - handle to identify resources
291 void gru_release_async_resources(unsigned long han
)
293 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
295 down_write(&bs
->bs_kgts_sema
);
296 bs
->bs_async_dsr_bytes
= 0;
297 bs
->bs_async_cbrs
= 0;
298 bs
->bs_async_wq
= NULL
;
299 up_write(&bs
->bs_kgts_sema
);
303 * Wait for async GRU instructions to complete.
306 * han - handle to identify resources
308 void gru_wait_async_cbr(unsigned long han
)
310 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
312 wait_for_completion(bs
->bs_async_wq
);
317 * Lock previous reserved async GRU resources
320 * han - handle to identify resources
322 * cb - pointer to first CBR
323 * dsr - pointer to first DSR
325 void gru_lock_async_resource(unsigned long han
, void **cb
, void **dsr
)
327 struct gru_blade_state
*bs
= ASYNC_HAN_TO_BS(han
);
328 int blade_id
= ASYNC_HAN_TO_BID(han
);
331 gru_lock_kernel_context(blade_id
);
332 ncpus
= uv_blade_nr_possible_cpus(blade_id
);
334 *cb
= bs
->kernel_cb
+ ncpus
* GRU_HANDLE_STRIDE
;
336 *dsr
= bs
->kernel_dsr
+ ncpus
* GRU_NUM_KERNEL_DSR_BYTES
;
340 * Unlock previous reserved async GRU resources
343 * han - handle to identify resources
345 void gru_unlock_async_resource(unsigned long han
)
347 int blade_id
= ASYNC_HAN_TO_BID(han
);
349 gru_unlock_kernel_context(blade_id
);
352 /*----------------------------------------------------------------------*/
353 int gru_get_cb_exception_detail(void *cb
,
354 struct control_block_extended_exc_detail
*excdet
)
356 struct gru_control_block_extended
*cbe
;
358 cbe
= get_cbe(GRUBASE(cb
), get_cb_number(cb
));
359 prefetchw(cbe
); /* Harmless on hardware, required for emulator */
360 excdet
->opc
= cbe
->opccpy
;
361 excdet
->exopc
= cbe
->exopccpy
;
362 excdet
->ecause
= cbe
->ecause
;
363 excdet
->exceptdet0
= cbe
->idef1upd
;
364 excdet
->exceptdet1
= cbe
->idef3upd
;
368 char *gru_get_cb_exception_detail_str(int ret
, void *cb
,
371 struct gru_control_block_status
*gen
= (void *)cb
;
372 struct control_block_extended_exc_detail excdet
;
374 if (ret
> 0 && gen
->istatus
== CBS_EXCEPTION
) {
375 gru_get_cb_exception_detail(cb
, &excdet
);
377 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
378 "excdet0 0x%lx, excdet1 0x%x",
379 gen
, excdet
.opc
, excdet
.exopc
, excdet
.ecause
,
380 excdet
.exceptdet0
, excdet
.exceptdet1
);
382 snprintf(buf
, size
, "No exception");
387 static int gru_wait_idle_or_exception(struct gru_control_block_status
*gen
)
389 while (gen
->istatus
>= CBS_ACTIVE
) {
396 static int gru_retry_exception(void *cb
)
398 struct gru_control_block_status
*gen
= (void *)cb
;
399 struct control_block_extended_exc_detail excdet
;
400 int retry
= EXCEPTION_RETRY_LIMIT
;
403 if (gru_get_cb_message_queue_substatus(cb
))
405 if (gru_wait_idle_or_exception(gen
) == CBS_IDLE
)
408 gru_get_cb_exception_detail(cb
, &excdet
);
409 if (excdet
.ecause
& ~EXCEPTION_RETRY_BITS
)
414 gru_flush_cache(gen
);
416 return CBS_EXCEPTION
;
419 int gru_check_status_proc(void *cb
)
421 struct gru_control_block_status
*gen
= (void *)cb
;
425 if (ret
!= CBS_EXCEPTION
)
427 return gru_retry_exception(cb
);
431 int gru_wait_proc(void *cb
)
433 struct gru_control_block_status
*gen
= (void *)cb
;
436 ret
= gru_wait_idle_or_exception(gen
);
437 if (ret
== CBS_EXCEPTION
)
438 ret
= gru_retry_exception(cb
);
443 void gru_abort(int ret
, void *cb
, char *str
)
445 char buf
[GRU_EXC_STR_SIZE
];
447 panic("GRU FATAL ERROR: %s - %s\n", str
,
448 gru_get_cb_exception_detail_str(ret
, cb
, buf
, sizeof(buf
)));
451 void gru_wait_abort_proc(void *cb
)
455 ret
= gru_wait_proc(cb
);
457 gru_abort(ret
, cb
, "gru_wait_abort");
461 /*------------------------------ MESSAGE QUEUES -----------------------------*/
463 /* Internal status . These are NOT returned to the user. */
464 #define MQIE_AGAIN -1 /* try again */
468 * Save/restore the "present" flag that is in the second line of 2-line
471 static inline int get_present2(void *p
)
473 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
474 return mhdr
->present
;
477 static inline void restore_present2(void *p
, int val
)
479 struct message_header
*mhdr
= p
+ GRU_CACHE_LINE_BYTES
;
484 * Create a message queue.
485 * qlines - message queue size in cache lines. Includes 2-line header.
487 int gru_create_message_queue(struct gru_message_queue_desc
*mqd
,
488 void *p
, unsigned int bytes
, int nasid
, int vector
, int apicid
)
490 struct message_queue
*mq
= p
;
493 qlines
= bytes
/ GRU_CACHE_LINE_BYTES
- 2;
494 memset(mq
, 0, bytes
);
495 mq
->start
= &mq
->data
;
496 mq
->start2
= &mq
->data
+ (qlines
/ 2 - 1) * GRU_CACHE_LINE_BYTES
;
497 mq
->next
= &mq
->data
;
498 mq
->limit
= &mq
->data
+ (qlines
- 2) * GRU_CACHE_LINE_BYTES
;
502 mq
->head
= gru_mesq_head(2, qlines
/ 2 + 1);
504 mqd
->mq_gpa
= uv_gpa(mq
);
505 mqd
->qlines
= qlines
;
506 mqd
->interrupt_pnode
= UV_NASID_TO_PNODE(nasid
);
507 mqd
->interrupt_vector
= vector
;
508 mqd
->interrupt_apicid
= apicid
;
511 EXPORT_SYMBOL_GPL(gru_create_message_queue
);
514 * Send a NOOP message to a message queue
516 * 0 - if queue is full after the send. This is the normal case
517 * but various races can change this.
518 * -1 - if mesq sent successfully but queue not full
519 * >0 - unexpected error. MQE_xxx returned
521 static int send_noop_message(void *cb
, struct gru_message_queue_desc
*mqd
,
524 const struct message_header noop_header
= {
525 .present
= MQS_NOOP
, .lines
= 1};
528 struct message_header save_mhdr
, *mhdr
= mesg
;
533 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), 1, IMA
);
537 substatus
= gru_get_cb_message_queue_substatus(cb
);
540 STAT(mesq_noop_unexpected_error
);
541 ret
= MQE_UNEXPECTED_CB_ERR
;
543 case CBSS_LB_OVERFLOWED
:
544 STAT(mesq_noop_lb_overflow
);
545 ret
= MQE_CONGESTION
;
547 case CBSS_QLIMIT_REACHED
:
548 STAT(mesq_noop_qlimit_reached
);
551 case CBSS_AMO_NACKED
:
552 STAT(mesq_noop_amo_nacked
);
553 ret
= MQE_CONGESTION
;
555 case CBSS_PUT_NACKED
:
556 STAT(mesq_noop_put_nacked
);
557 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
558 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, 1, 1,
560 if (gru_wait(cb
) == CBS_IDLE
)
563 ret
= MQE_UNEXPECTED_CB_ERR
;
565 case CBSS_PAGE_OVERFLOW
:
575 * Handle a gru_mesq full.
577 static int send_message_queue_full(void *cb
, struct gru_message_queue_desc
*mqd
,
578 void *mesg
, int lines
)
580 union gru_mesqhead mqh
;
581 unsigned int limit
, head
;
582 unsigned long avalue
;
585 /* Determine if switching to first/second half of q */
586 avalue
= gru_get_amo_value(cb
);
587 head
= gru_get_amo_value_head(cb
);
588 limit
= gru_get_amo_value_limit(cb
);
590 qlines
= mqd
->qlines
;
591 half
= (limit
!= qlines
);
594 mqh
= gru_mesq_head(qlines
/ 2 + 1, qlines
);
596 mqh
= gru_mesq_head(2, qlines
/ 2 + 1);
598 /* Try to get lock for switching head pointer */
599 gru_gamir(cb
, EOP_IR_CLR
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
, IMA
);
600 if (gru_wait(cb
) != CBS_IDLE
)
602 if (!gru_get_amo_value(cb
)) {
603 STAT(mesq_qf_locked
);
604 return MQE_QUEUE_FULL
;
607 /* Got the lock. Send optional NOP if queue not full, */
609 if (send_noop_message(cb
, mqd
, mesg
)) {
610 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
),
612 if (gru_wait(cb
) != CBS_IDLE
)
614 STAT(mesq_qf_noop_not_full
);
620 /* Then flip queuehead to other half of queue. */
621 gru_gamer(cb
, EOP_ERR_CSWAP
, mqd
->mq_gpa
, XTYPE_DW
, mqh
.val
, avalue
,
623 if (gru_wait(cb
) != CBS_IDLE
)
626 /* If not successfully in swapping queue head, clear the hstatus lock */
627 if (gru_get_amo_value(cb
) != avalue
) {
628 STAT(mesq_qf_switch_head_failed
);
629 gru_gamir(cb
, EOP_IR_INC
, HSTATUS(mqd
->mq_gpa
, half
), XTYPE_DW
,
631 if (gru_wait(cb
) != CBS_IDLE
)
636 STAT(mesq_qf_unexpected_error
);
637 return MQE_UNEXPECTED_CB_ERR
;
641 * Send a cross-partition interrupt to the SSI that contains the target
642 * message queue. Normally, the interrupt is automatically delivered by hardware
643 * but some error conditions require explicit delivery.
645 static void send_message_queue_interrupt(struct gru_message_queue_desc
*mqd
)
647 if (mqd
->interrupt_vector
)
648 uv_hub_send_ipi(mqd
->interrupt_pnode
, mqd
->interrupt_apicid
,
649 mqd
->interrupt_vector
);
653 * Handle a PUT failure. Note: if message was a 2-line message, one of the
654 * lines might have successfully have been written. Before sending the
655 * message, "present" must be cleared in BOTH lines to prevent the receiver
656 * from prematurely seeing the full message.
658 static int send_message_put_nacked(void *cb
, struct gru_message_queue_desc
*mqd
,
659 void *mesg
, int lines
)
663 m
= mqd
->mq_gpa
+ (gru_get_amo_value_head(cb
) << 6);
665 gru_vset(cb
, m
, 0, XTYPE_CL
, lines
, 1, IMA
);
666 if (gru_wait(cb
) != CBS_IDLE
)
667 return MQE_UNEXPECTED_CB_ERR
;
669 gru_vstore(cb
, m
, gru_get_tri(mesg
), XTYPE_CL
, lines
, 1, IMA
);
670 if (gru_wait(cb
) != CBS_IDLE
)
671 return MQE_UNEXPECTED_CB_ERR
;
672 send_message_queue_interrupt(mqd
);
677 * Handle a gru_mesq failure. Some of these failures are software recoverable
680 static int send_message_failure(void *cb
, struct gru_message_queue_desc
*mqd
,
681 void *mesg
, int lines
)
683 int substatus
, ret
= 0;
685 substatus
= gru_get_cb_message_queue_substatus(cb
);
688 STAT(mesq_send_unexpected_error
);
689 ret
= MQE_UNEXPECTED_CB_ERR
;
691 case CBSS_LB_OVERFLOWED
:
692 STAT(mesq_send_lb_overflow
);
693 ret
= MQE_CONGESTION
;
695 case CBSS_QLIMIT_REACHED
:
696 STAT(mesq_send_qlimit_reached
);
697 ret
= send_message_queue_full(cb
, mqd
, mesg
, lines
);
699 case CBSS_AMO_NACKED
:
700 STAT(mesq_send_amo_nacked
);
701 ret
= MQE_CONGESTION
;
703 case CBSS_PUT_NACKED
:
704 STAT(mesq_send_put_nacked
);
705 ret
= send_message_put_nacked(cb
, mqd
, mesg
, lines
);
714 * Send a message to a message queue
715 * mqd message queue descriptor
716 * mesg message. ust be vaddr within a GSEG
717 * bytes message size (<= 2 CL)
719 int gru_send_message_gpa(struct gru_message_queue_desc
*mqd
, void *mesg
,
722 struct message_header
*mhdr
;
725 int istatus
, clines
, ret
;
728 BUG_ON(bytes
< sizeof(int) || bytes
> 2 * GRU_CACHE_LINE_BYTES
);
730 clines
= DIV_ROUND_UP(bytes
, GRU_CACHE_LINE_BYTES
);
731 if (gru_get_cpu_resources(bytes
, &cb
, &dsr
))
732 return MQE_BUG_NO_RESOURCES
;
733 memcpy(dsr
, mesg
, bytes
);
735 mhdr
->present
= MQS_FULL
;
736 mhdr
->lines
= clines
;
738 mhdr
->present2
= get_present2(mhdr
);
739 restore_present2(mhdr
, MQS_FULL
);
744 gru_mesq(cb
, mqd
->mq_gpa
, gru_get_tri(mhdr
), clines
, IMA
);
745 istatus
= gru_wait(cb
);
746 if (istatus
!= CBS_IDLE
)
747 ret
= send_message_failure(cb
, mqd
, dsr
, clines
);
748 } while (ret
== MQIE_AGAIN
);
749 gru_free_cpu_resources(cb
, dsr
);
752 STAT(mesq_send_failed
);
755 EXPORT_SYMBOL_GPL(gru_send_message_gpa
);
758 * Advance the receive pointer for the queue to the next message.
760 void gru_free_message(struct gru_message_queue_desc
*mqd
, void *mesg
)
762 struct message_queue
*mq
= mqd
->mq
;
763 struct message_header
*mhdr
= mq
->next
;
766 int lines
= mhdr
->lines
;
769 restore_present2(mhdr
, MQS_EMPTY
);
770 mhdr
->present
= MQS_EMPTY
;
773 next
= pnext
+ GRU_CACHE_LINE_BYTES
* lines
;
774 if (next
== mq
->limit
) {
777 } else if (pnext
< mq
->start2
&& next
>= mq
->start2
) {
782 mq
->hstatus
[half
] = 1;
785 EXPORT_SYMBOL_GPL(gru_free_message
);
788 * Get next message from message queue. Return NULL if no message
789 * present. User must call next_message() to move to next message.
792 void *gru_get_next_message(struct gru_message_queue_desc
*mqd
)
794 struct message_queue
*mq
= mqd
->mq
;
795 struct message_header
*mhdr
= mq
->next
;
796 int present
= mhdr
->present
;
798 /* skip NOOP messages */
800 while (present
== MQS_NOOP
) {
801 gru_free_message(mqd
, mhdr
);
803 present
= mhdr
->present
;
806 /* Wait for both halves of 2 line messages */
807 if (present
== MQS_FULL
&& mhdr
->lines
== 2 &&
808 get_present2(mhdr
) == MQS_EMPTY
)
812 STAT(mesq_receive_none
);
816 if (mhdr
->lines
== 2)
817 restore_present2(mhdr
, mhdr
->present2
);
821 EXPORT_SYMBOL_GPL(gru_get_next_message
);
823 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
826 * Copy a block of data using the GRU resources
828 int gru_copy_gpa(unsigned long dest_gpa
, unsigned long src_gpa
,
836 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES
, &cb
, &dsr
))
837 return MQE_BUG_NO_RESOURCES
;
838 gru_bcopy(cb
, src_gpa
, dest_gpa
, gru_get_tri(dsr
),
839 XTYPE_B
, bytes
, GRU_NUM_KERNEL_DSR_CL
, IMA
);
841 gru_free_cpu_resources(cb
, dsr
);
844 EXPORT_SYMBOL_GPL(gru_copy_gpa
);
846 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
847 /* Temp - will delete after we gain confidence in the GRU */
849 static int quicktest0(unsigned long arg
)
858 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES
, &cb
, &dsr
))
859 return MQE_BUG_NO_RESOURCES
;
864 gru_vload(cb
, uv_gpa(&word0
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
865 if (gru_wait(cb
) != CBS_IDLE
) {
866 printk(KERN_DEBUG
"GRU quicktest0: CBR failure 1\n");
871 printk(KERN_DEBUG
"GRU: quicktest0 bad magic 0x%lx\n", *p
);
874 gru_vstore(cb
, uv_gpa(&word1
), gru_get_tri(dsr
), XTYPE_DW
, 1, 1, IMA
);
875 if (gru_wait(cb
) != CBS_IDLE
) {
876 printk(KERN_DEBUG
"GRU quicktest0: CBR failure 2\n");
880 if (word0
!= word1
|| word1
!= MAGIC
) {
882 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
889 gru_free_cpu_resources(cb
, dsr
);
893 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
895 static int quicktest1(unsigned long arg
)
897 struct gru_message_queue_desc mqd
;
901 char mes
[GRU_CACHE_LINE_BYTES
], *m
;
903 /* Need 1K cacheline aligned that does not cross page boundary */
904 p
= kmalloc(4096, 0);
905 mq
= ALIGNUP(p
, 1024);
906 memset(mes
, 0xee, sizeof(mes
));
909 gru_create_message_queue(&mqd
, mq
, 8 * GRU_CACHE_LINE_BYTES
, 0, 0, 0);
910 for (i
= 0; i
< 6; i
++) {
913 ret
= gru_send_message_gpa(&mqd
, mes
, sizeof(mes
));
914 } while (ret
== MQE_CONGESTION
);
918 if (ret
!= MQE_QUEUE_FULL
|| i
!= 4)
921 for (i
= 0; i
< 6; i
++) {
922 m
= gru_get_next_message(&mqd
);
925 gru_free_message(&mqd
, m
);
927 ret
= (i
== 4) ? 0 : -EIO
;
934 static int quicktest2(unsigned long arg
)
936 static DECLARE_COMPLETION(cmp
);
943 int i
, k
, istatus
, bytes
;
945 bytes
= numcb
* 4 * 8;
946 buf
= kmalloc(bytes
, GFP_KERNEL
);
951 han
= gru_reserve_async_resources(blade_id
, numcb
, 0, &cmp
);
955 gru_lock_async_resource(han
, &cb0
, NULL
);
956 memset(buf
, 0xee, bytes
);
957 for (i
= 0; i
< numcb
; i
++)
958 gru_vset(cb0
+ i
* GRU_HANDLE_STRIDE
, uv_gpa(&buf
[i
* 4]), 0,
959 XTYPE_DW
, 4, 1, IMA_INTERRUPT
);
962 for (k
= 0; k
< numcb
; k
++) {
963 gru_wait_async_cbr(han
);
964 for (i
= 0; i
< numcb
; i
++) {
965 cb
= cb0
+ i
* GRU_HANDLE_STRIDE
;
966 istatus
= gru_check_status(cb
);
967 if (istatus
== CBS_ACTIVE
)
969 if (istatus
== CBS_EXCEPTION
)
971 else if (buf
[i
] || buf
[i
+ 1] || buf
[i
+ 2] ||
978 gru_unlock_async_resource(han
);
979 gru_release_async_resources(han
);
986 * Debugging only. User hook for various kernel tests
989 int gru_ktest(unsigned long arg
)
993 switch (arg
& 0xff) {
995 ret
= quicktest0(arg
);
998 ret
= quicktest1(arg
);
1001 ret
= quicktest2(arg
);
1008 int gru_kservices_init(struct gru_state
*gru
)
1010 struct gru_blade_state
*bs
;
1013 if (gru
!= &bs
->bs_grus
[0])
1016 init_rwsem(&bs
->bs_kgts_sema
);
1020 void gru_kservices_exit(struct gru_state
*gru
)
1022 struct gru_blade_state
*bs
;
1023 struct gru_thread_state
*kgts
;
1026 if (gru
!= &bs
->bs_grus
[0])
1030 if (kgts
&& kgts
->ts_gru
)
1031 gru_unload_context(kgts
, 0);