gru: fix prefetch and speculation bugs
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / sgi-gru / grukservices.c
blob8c81aca0463ae1a21d6d8fd4badd461370a83997
1 /*
2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/miscdevice.h>
30 #include <linux/proc_fs.h>
31 #include <linux/interrupt.h>
32 #include <linux/uaccess.h>
33 #include <linux/delay.h>
34 #include "gru.h"
35 #include "grulib.h"
36 #include "grutables.h"
37 #include "grukservices.h"
38 #include "gru_instructions.h"
39 #include <asm/uv/uv_hub.h>
42 * Kernel GRU Usage
44 * The following is an interim algorithm for management of kernel GRU
45 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements.
48 * Blade percpu resources reserved for kernel use. These resources are
49 * reserved whenever the the kernel context for the blade is loaded. Note
50 * that the kernel context is not guaranteed to be always available. It is
51 * loaded on demand & can be stolen by a user if the user demand exceeds the
52 * kernel demand. The kernel can always reload the kernel context but
53 * a SLEEP may be required!!!.
55 * Async Overview:
57 * Each blade has one "kernel context" that owns GRU kernel resources
58 * located on the blade. Kernel drivers use GRU resources in this context
59 * for sending messages, zeroing memory, etc.
61 * The kernel context is dynamically loaded on demand. If it is not in
62 * use by the kernel, the kernel context can be unloaded & given to a user.
63 * The kernel context will be reloaded when needed. This may require that
64 * a context be stolen from a user.
65 * NOTE: frequent unloading/reloading of the kernel context is
66 * expensive. We are depending on batch schedulers, cpusets, sane
67 * drivers or some other mechanism to prevent the need for frequent
68 * stealing/reloading.
70 * The kernel context consists of two parts:
71 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
72 * Each cpu has it's own private resources & does not share them
73 * with other cpus. These resources are used serially, ie,
74 * locked, used & unlocked on each call to a function in
75 * grukservices.
76 * (Now that we have dynamic loading of kernel contexts, I
77 * may rethink this & allow sharing between cpus....)
79 * - Additional resources can be reserved long term & used directly
80 * by UV drivers located in the kernel. Drivers using these GRU
81 * resources can use asynchronous GRU instructions that send
82 * interrupts on completion.
83 * - these resources must be explicitly locked/unlocked
84 * - locked resources prevent (obviously) the kernel
85 * context from being unloaded.
86 * - drivers using these resource directly issue their own
87 * GRU instruction and must wait/check completion.
89 * When these resources are reserved, the caller can optionally
90 * associate a wait_queue with the resources and use asynchronous
91 * GRU instructions. When an async GRU instruction completes, the
92 * driver will do a wakeup on the event.
97 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
98 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
99 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
100 #define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \
101 (GRU_SIZE * GRU_CHIPLETS_PER_BLADE))
102 #define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)]
104 #define GRU_NUM_KERNEL_CBR 1
105 #define GRU_NUM_KERNEL_DSR_BYTES 256
106 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
107 GRU_CACHE_LINE_BYTES)
109 /* GRU instruction attributes for all instructions */
110 #define IMA IMA_CB_DELAY
112 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
113 #define __gru_cacheline_aligned__ \
114 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
116 #define MAGIC 0x1234567887654321UL
118 /* Default retry count for GRU errors on kernel instructions */
119 #define EXCEPTION_RETRY_LIMIT 3
121 /* Status of message queue sections */
122 #define MQS_EMPTY 0
123 #define MQS_FULL 1
124 #define MQS_NOOP 2
126 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
127 /* optimized for x86_64 */
128 struct message_queue {
129 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
130 int qlines; /* DW 1 */
131 long hstatus[2];
132 void *next __gru_cacheline_aligned__;/* CL 1 */
133 void *limit;
134 void *start;
135 void *start2;
136 char data ____cacheline_aligned; /* CL 2 */
139 /* First word in every message - used by mesq interface */
140 struct message_header {
141 char present;
142 char present2;
143 char lines;
144 char fill;
147 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
150 * Reload the blade's kernel context into a GRU chiplet. Called holding
151 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
153 static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
155 struct gru_state *gru;
156 struct gru_thread_state *kgts;
157 void *vaddr;
158 int ctxnum, ncpus;
160 up_read(&bs->bs_kgts_sema);
161 down_write(&bs->bs_kgts_sema);
163 if (!bs->bs_kgts) {
164 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
165 bs->bs_kgts->ts_user_blade_id = blade_id;
167 kgts = bs->bs_kgts;
169 if (!kgts->ts_gru) {
170 STAT(load_kernel_context);
171 ncpus = uv_blade_nr_possible_cpus(blade_id);
172 kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
173 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
174 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
175 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
176 bs->bs_async_dsr_bytes);
177 while (!gru_assign_gru_context(kgts)) {
178 msleep(1);
179 gru_steal_context(kgts);
181 gru_load_context(kgts);
182 gru = bs->bs_kgts->ts_gru;
183 vaddr = gru->gs_gru_base_vaddr;
184 ctxnum = kgts->ts_ctxnum;
185 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
186 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
188 downgrade_write(&bs->bs_kgts_sema);
192 * Free all kernel contexts that are not currently in use.
193 * Returns 0 if all freed, else number of inuse context.
195 static int gru_free_kernel_contexts(void)
197 struct gru_blade_state *bs;
198 struct gru_thread_state *kgts;
199 int bid, ret = 0;
201 for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
202 bs = gru_base[bid];
203 if (!bs)
204 continue;
206 /* Ignore busy contexts. Don't want to block here. */
207 if (down_write_trylock(&bs->bs_kgts_sema)) {
208 kgts = bs->bs_kgts;
209 if (kgts && kgts->ts_gru)
210 gru_unload_context(kgts, 0);
211 bs->bs_kgts = NULL;
212 up_write(&bs->bs_kgts_sema);
213 kfree(kgts);
214 } else {
215 ret++;
218 return ret;
222 * Lock & load the kernel context for the specified blade.
224 static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
226 struct gru_blade_state *bs;
228 STAT(lock_kernel_context);
229 bs = gru_base[blade_id];
231 down_read(&bs->bs_kgts_sema);
232 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
233 gru_load_kernel_context(bs, blade_id);
234 return bs;
239 * Unlock the kernel context for the specified blade. Context is not
240 * unloaded but may be stolen before next use.
242 static void gru_unlock_kernel_context(int blade_id)
244 struct gru_blade_state *bs;
246 bs = gru_base[blade_id];
247 up_read(&bs->bs_kgts_sema);
248 STAT(unlock_kernel_context);
252 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
253 * - returns with preemption disabled
255 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
257 struct gru_blade_state *bs;
258 int lcpu;
260 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
261 preempt_disable();
262 bs = gru_lock_kernel_context(uv_numa_blade_id());
263 lcpu = uv_blade_processor_id();
264 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
265 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
266 return 0;
270 * Free the current cpus reserved DSR/CBR resources.
272 static void gru_free_cpu_resources(void *cb, void *dsr)
274 gru_unlock_kernel_context(uv_numa_blade_id());
275 preempt_enable();
279 * Reserve GRU resources to be used asynchronously.
280 * Note: currently supports only 1 reservation per blade.
282 * input:
283 * blade_id - blade on which resources should be reserved
284 * cbrs - number of CBRs
285 * dsr_bytes - number of DSR bytes needed
286 * output:
287 * handle to identify resource
288 * (0 = async resources already reserved)
290 unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
291 struct completion *cmp)
293 struct gru_blade_state *bs;
294 struct gru_thread_state *kgts;
295 int ret = 0;
297 bs = gru_base[blade_id];
299 down_write(&bs->bs_kgts_sema);
301 /* Verify no resources already reserved */
302 if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
303 goto done;
304 bs->bs_async_dsr_bytes = dsr_bytes;
305 bs->bs_async_cbrs = cbrs;
306 bs->bs_async_wq = cmp;
307 kgts = bs->bs_kgts;
309 /* Resources changed. Unload context if already loaded */
310 if (kgts && kgts->ts_gru)
311 gru_unload_context(kgts, 0);
312 ret = ASYNC_BID_TO_HAN(blade_id);
314 done:
315 up_write(&bs->bs_kgts_sema);
316 return ret;
320 * Release async resources previously reserved.
322 * input:
323 * han - handle to identify resources
325 void gru_release_async_resources(unsigned long han)
327 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
329 down_write(&bs->bs_kgts_sema);
330 bs->bs_async_dsr_bytes = 0;
331 bs->bs_async_cbrs = 0;
332 bs->bs_async_wq = NULL;
333 up_write(&bs->bs_kgts_sema);
337 * Wait for async GRU instructions to complete.
339 * input:
340 * han - handle to identify resources
342 void gru_wait_async_cbr(unsigned long han)
344 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
346 wait_for_completion(bs->bs_async_wq);
347 mb();
351 * Lock previous reserved async GRU resources
353 * input:
354 * han - handle to identify resources
355 * output:
356 * cb - pointer to first CBR
357 * dsr - pointer to first DSR
359 void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
361 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
362 int blade_id = ASYNC_HAN_TO_BID(han);
363 int ncpus;
365 gru_lock_kernel_context(blade_id);
366 ncpus = uv_blade_nr_possible_cpus(blade_id);
367 if (cb)
368 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
369 if (dsr)
370 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
374 * Unlock previous reserved async GRU resources
376 * input:
377 * han - handle to identify resources
379 void gru_unlock_async_resource(unsigned long han)
381 int blade_id = ASYNC_HAN_TO_BID(han);
383 gru_unlock_kernel_context(blade_id);
386 /*----------------------------------------------------------------------*/
387 int gru_get_cb_exception_detail(void *cb,
388 struct control_block_extended_exc_detail *excdet)
390 struct gru_control_block_extended *cbe;
391 struct gru_blade_state *bs;
392 int cbrnum;
394 bs = KCB_TO_BS(cb);
395 cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb));
396 cbe = get_cbe(GRUBASE(cb), cbrnum);
397 gru_flush_cache(cbe); /* CBE not coherent */
398 sync_core();
399 excdet->opc = cbe->opccpy;
400 excdet->exopc = cbe->exopccpy;
401 excdet->ecause = cbe->ecause;
402 excdet->exceptdet0 = cbe->idef1upd;
403 excdet->exceptdet1 = cbe->idef3upd;
404 gru_flush_cache(cbe);
405 return 0;
408 char *gru_get_cb_exception_detail_str(int ret, void *cb,
409 char *buf, int size)
411 struct gru_control_block_status *gen = (void *)cb;
412 struct control_block_extended_exc_detail excdet;
414 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
415 gru_get_cb_exception_detail(cb, &excdet);
416 snprintf(buf, size,
417 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
418 "excdet0 0x%lx, excdet1 0x%x",
419 gen, excdet.opc, excdet.exopc, excdet.ecause,
420 excdet.exceptdet0, excdet.exceptdet1);
421 } else {
422 snprintf(buf, size, "No exception");
424 return buf;
427 static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
429 while (gen->istatus >= CBS_ACTIVE) {
430 cpu_relax();
431 barrier();
433 return gen->istatus;
436 static int gru_retry_exception(void *cb)
438 struct gru_control_block_status *gen = (void *)cb;
439 struct control_block_extended_exc_detail excdet;
440 int retry = EXCEPTION_RETRY_LIMIT;
442 while (1) {
443 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
444 return CBS_IDLE;
445 if (gru_get_cb_message_queue_substatus(cb))
446 return CBS_EXCEPTION;
447 gru_get_cb_exception_detail(cb, &excdet);
448 if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
449 (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
450 break;
451 if (retry-- == 0)
452 break;
453 gen->icmd = 1;
454 gru_flush_cache(gen);
456 return CBS_EXCEPTION;
459 int gru_check_status_proc(void *cb)
461 struct gru_control_block_status *gen = (void *)cb;
462 int ret;
464 ret = gen->istatus;
465 if (ret == CBS_EXCEPTION)
466 ret = gru_retry_exception(cb);
467 rmb();
468 return ret;
472 int gru_wait_proc(void *cb)
474 struct gru_control_block_status *gen = (void *)cb;
475 int ret;
477 ret = gru_wait_idle_or_exception(gen);
478 if (ret == CBS_EXCEPTION)
479 ret = gru_retry_exception(cb);
480 rmb();
481 return ret;
484 void gru_abort(int ret, void *cb, char *str)
486 char buf[GRU_EXC_STR_SIZE];
488 panic("GRU FATAL ERROR: %s - %s\n", str,
489 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
492 void gru_wait_abort_proc(void *cb)
494 int ret;
496 ret = gru_wait_proc(cb);
497 if (ret)
498 gru_abort(ret, cb, "gru_wait_abort");
502 /*------------------------------ MESSAGE QUEUES -----------------------------*/
504 /* Internal status . These are NOT returned to the user. */
505 #define MQIE_AGAIN -1 /* try again */
509 * Save/restore the "present" flag that is in the second line of 2-line
510 * messages
512 static inline int get_present2(void *p)
514 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
515 return mhdr->present;
518 static inline void restore_present2(void *p, int val)
520 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
521 mhdr->present = val;
525 * Create a message queue.
526 * qlines - message queue size in cache lines. Includes 2-line header.
528 int gru_create_message_queue(struct gru_message_queue_desc *mqd,
529 void *p, unsigned int bytes, int nasid, int vector, int apicid)
531 struct message_queue *mq = p;
532 unsigned int qlines;
534 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
535 memset(mq, 0, bytes);
536 mq->start = &mq->data;
537 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
538 mq->next = &mq->data;
539 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
540 mq->qlines = qlines;
541 mq->hstatus[0] = 0;
542 mq->hstatus[1] = 1;
543 mq->head = gru_mesq_head(2, qlines / 2 + 1);
544 mqd->mq = mq;
545 mqd->mq_gpa = uv_gpa(mq);
546 mqd->qlines = qlines;
547 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
548 mqd->interrupt_vector = vector;
549 mqd->interrupt_apicid = apicid;
550 return 0;
552 EXPORT_SYMBOL_GPL(gru_create_message_queue);
555 * Send a NOOP message to a message queue
556 * Returns:
557 * 0 - if queue is full after the send. This is the normal case
558 * but various races can change this.
559 * -1 - if mesq sent successfully but queue not full
560 * >0 - unexpected error. MQE_xxx returned
562 static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
563 void *mesg)
565 const struct message_header noop_header = {
566 .present = MQS_NOOP, .lines = 1};
567 unsigned long m;
568 int substatus, ret;
569 struct message_header save_mhdr, *mhdr = mesg;
571 STAT(mesq_noop);
572 save_mhdr = *mhdr;
573 *mhdr = noop_header;
574 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
575 ret = gru_wait(cb);
577 if (ret) {
578 substatus = gru_get_cb_message_queue_substatus(cb);
579 switch (substatus) {
580 case CBSS_NO_ERROR:
581 STAT(mesq_noop_unexpected_error);
582 ret = MQE_UNEXPECTED_CB_ERR;
583 break;
584 case CBSS_LB_OVERFLOWED:
585 STAT(mesq_noop_lb_overflow);
586 ret = MQE_CONGESTION;
587 break;
588 case CBSS_QLIMIT_REACHED:
589 STAT(mesq_noop_qlimit_reached);
590 ret = 0;
591 break;
592 case CBSS_AMO_NACKED:
593 STAT(mesq_noop_amo_nacked);
594 ret = MQE_CONGESTION;
595 break;
596 case CBSS_PUT_NACKED:
597 STAT(mesq_noop_put_nacked);
598 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
599 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
600 IMA);
601 if (gru_wait(cb) == CBS_IDLE)
602 ret = MQIE_AGAIN;
603 else
604 ret = MQE_UNEXPECTED_CB_ERR;
605 break;
606 case CBSS_PAGE_OVERFLOW:
607 default:
608 BUG();
611 *mhdr = save_mhdr;
612 return ret;
616 * Handle a gru_mesq full.
618 static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
619 void *mesg, int lines)
621 union gru_mesqhead mqh;
622 unsigned int limit, head;
623 unsigned long avalue;
624 int half, qlines;
626 /* Determine if switching to first/second half of q */
627 avalue = gru_get_amo_value(cb);
628 head = gru_get_amo_value_head(cb);
629 limit = gru_get_amo_value_limit(cb);
631 qlines = mqd->qlines;
632 half = (limit != qlines);
634 if (half)
635 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
636 else
637 mqh = gru_mesq_head(2, qlines / 2 + 1);
639 /* Try to get lock for switching head pointer */
640 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
641 if (gru_wait(cb) != CBS_IDLE)
642 goto cberr;
643 if (!gru_get_amo_value(cb)) {
644 STAT(mesq_qf_locked);
645 return MQE_QUEUE_FULL;
648 /* Got the lock. Send optional NOP if queue not full, */
649 if (head != limit) {
650 if (send_noop_message(cb, mqd, mesg)) {
651 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
652 XTYPE_DW, IMA);
653 if (gru_wait(cb) != CBS_IDLE)
654 goto cberr;
655 STAT(mesq_qf_noop_not_full);
656 return MQIE_AGAIN;
658 avalue++;
661 /* Then flip queuehead to other half of queue. */
662 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
663 IMA);
664 if (gru_wait(cb) != CBS_IDLE)
665 goto cberr;
667 /* If not successfully in swapping queue head, clear the hstatus lock */
668 if (gru_get_amo_value(cb) != avalue) {
669 STAT(mesq_qf_switch_head_failed);
670 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
671 IMA);
672 if (gru_wait(cb) != CBS_IDLE)
673 goto cberr;
675 return MQIE_AGAIN;
676 cberr:
677 STAT(mesq_qf_unexpected_error);
678 return MQE_UNEXPECTED_CB_ERR;
682 * Send a cross-partition interrupt to the SSI that contains the target
683 * message queue. Normally, the interrupt is automatically delivered by hardware
684 * but some error conditions require explicit delivery.
686 static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
688 if (mqd->interrupt_vector)
689 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
690 mqd->interrupt_vector);
694 * Handle a PUT failure. Note: if message was a 2-line message, one of the
695 * lines might have successfully have been written. Before sending the
696 * message, "present" must be cleared in BOTH lines to prevent the receiver
697 * from prematurely seeing the full message.
699 static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
700 void *mesg, int lines)
702 unsigned long m;
704 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
705 if (lines == 2) {
706 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
707 if (gru_wait(cb) != CBS_IDLE)
708 return MQE_UNEXPECTED_CB_ERR;
710 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
711 if (gru_wait(cb) != CBS_IDLE)
712 return MQE_UNEXPECTED_CB_ERR;
713 send_message_queue_interrupt(mqd);
714 return MQE_OK;
718 * Handle a gru_mesq failure. Some of these failures are software recoverable
719 * or retryable.
721 static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
722 void *mesg, int lines)
724 int substatus, ret = 0;
726 substatus = gru_get_cb_message_queue_substatus(cb);
727 switch (substatus) {
728 case CBSS_NO_ERROR:
729 STAT(mesq_send_unexpected_error);
730 ret = MQE_UNEXPECTED_CB_ERR;
731 break;
732 case CBSS_LB_OVERFLOWED:
733 STAT(mesq_send_lb_overflow);
734 ret = MQE_CONGESTION;
735 break;
736 case CBSS_QLIMIT_REACHED:
737 STAT(mesq_send_qlimit_reached);
738 ret = send_message_queue_full(cb, mqd, mesg, lines);
739 break;
740 case CBSS_AMO_NACKED:
741 STAT(mesq_send_amo_nacked);
742 ret = MQE_CONGESTION;
743 break;
744 case CBSS_PUT_NACKED:
745 STAT(mesq_send_put_nacked);
746 ret = send_message_put_nacked(cb, mqd, mesg, lines);
747 break;
748 default:
749 BUG();
751 return ret;
755 * Send a message to a message queue
756 * mqd message queue descriptor
757 * mesg message. ust be vaddr within a GSEG
758 * bytes message size (<= 2 CL)
760 int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
761 unsigned int bytes)
763 struct message_header *mhdr;
764 void *cb;
765 void *dsr;
766 int istatus, clines, ret;
768 STAT(mesq_send);
769 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
771 clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
772 if (gru_get_cpu_resources(bytes, &cb, &dsr))
773 return MQE_BUG_NO_RESOURCES;
774 memcpy(dsr, mesg, bytes);
775 mhdr = dsr;
776 mhdr->present = MQS_FULL;
777 mhdr->lines = clines;
778 if (clines == 2) {
779 mhdr->present2 = get_present2(mhdr);
780 restore_present2(mhdr, MQS_FULL);
783 do {
784 ret = MQE_OK;
785 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
786 istatus = gru_wait(cb);
787 if (istatus != CBS_IDLE)
788 ret = send_message_failure(cb, mqd, dsr, clines);
789 } while (ret == MQIE_AGAIN);
790 gru_free_cpu_resources(cb, dsr);
792 if (ret)
793 STAT(mesq_send_failed);
794 return ret;
796 EXPORT_SYMBOL_GPL(gru_send_message_gpa);
799 * Advance the receive pointer for the queue to the next message.
801 void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
803 struct message_queue *mq = mqd->mq;
804 struct message_header *mhdr = mq->next;
805 void *next, *pnext;
806 int half = -1;
807 int lines = mhdr->lines;
809 if (lines == 2)
810 restore_present2(mhdr, MQS_EMPTY);
811 mhdr->present = MQS_EMPTY;
813 pnext = mq->next;
814 next = pnext + GRU_CACHE_LINE_BYTES * lines;
815 if (next == mq->limit) {
816 next = mq->start;
817 half = 1;
818 } else if (pnext < mq->start2 && next >= mq->start2) {
819 half = 0;
822 if (half >= 0)
823 mq->hstatus[half] = 1;
824 mq->next = next;
826 EXPORT_SYMBOL_GPL(gru_free_message);
829 * Get next message from message queue. Return NULL if no message
830 * present. User must call next_message() to move to next message.
831 * rmq message queue
833 void *gru_get_next_message(struct gru_message_queue_desc *mqd)
835 struct message_queue *mq = mqd->mq;
836 struct message_header *mhdr = mq->next;
837 int present = mhdr->present;
839 /* skip NOOP messages */
840 STAT(mesq_receive);
841 while (present == MQS_NOOP) {
842 gru_free_message(mqd, mhdr);
843 mhdr = mq->next;
844 present = mhdr->present;
847 /* Wait for both halves of 2 line messages */
848 if (present == MQS_FULL && mhdr->lines == 2 &&
849 get_present2(mhdr) == MQS_EMPTY)
850 present = MQS_EMPTY;
852 if (!present) {
853 STAT(mesq_receive_none);
854 return NULL;
857 if (mhdr->lines == 2)
858 restore_present2(mhdr, mhdr->present2);
860 return mhdr;
862 EXPORT_SYMBOL_GPL(gru_get_next_message);
864 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
867 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
869 int gru_read_gpa(unsigned long *value, unsigned long gpa)
871 void *cb;
872 void *dsr;
873 int ret, iaa;
875 STAT(read_gpa);
876 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
877 return MQE_BUG_NO_RESOURCES;
878 iaa = gpa >> 62;
879 gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
880 ret = gru_wait(cb);
881 if (ret == CBS_IDLE)
882 *value = *(unsigned long *)dsr;
883 gru_free_cpu_resources(cb, dsr);
884 return ret;
886 EXPORT_SYMBOL_GPL(gru_read_gpa);
890 * Copy a block of data using the GRU resources
892 int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
893 unsigned int bytes)
895 void *cb;
896 void *dsr;
897 int ret;
899 STAT(copy_gpa);
900 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
901 return MQE_BUG_NO_RESOURCES;
902 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
903 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
904 ret = gru_wait(cb);
905 gru_free_cpu_resources(cb, dsr);
906 return ret;
908 EXPORT_SYMBOL_GPL(gru_copy_gpa);
910 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
911 /* Temp - will delete after we gain confidence in the GRU */
913 static int quicktest0(unsigned long arg)
915 unsigned long word0;
916 unsigned long word1;
917 void *cb;
918 void *dsr;
919 unsigned long *p;
920 int ret = -EIO;
922 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
923 return MQE_BUG_NO_RESOURCES;
924 p = dsr;
925 word0 = MAGIC;
926 word1 = 0;
928 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
929 if (gru_wait(cb) != CBS_IDLE) {
930 printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n");
931 goto done;
934 if (*p != MAGIC) {
935 printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p);
936 goto done;
938 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
939 if (gru_wait(cb) != CBS_IDLE) {
940 printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n");
941 goto done;
944 if (word0 != word1 || word1 != MAGIC) {
945 printk(KERN_DEBUG
946 "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n",
947 word1, MAGIC);
948 goto done;
950 ret = 0;
952 done:
953 gru_free_cpu_resources(cb, dsr);
954 return ret;
957 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
959 static int quicktest1(unsigned long arg)
961 struct gru_message_queue_desc mqd;
962 void *p, *mq;
963 unsigned long *dw;
964 int i, ret = -EIO;
965 char mes[GRU_CACHE_LINE_BYTES], *m;
967 /* Need 1K cacheline aligned that does not cross page boundary */
968 p = kmalloc(4096, 0);
969 if (p == NULL)
970 return -ENOMEM;
971 mq = ALIGNUP(p, 1024);
972 memset(mes, 0xee, sizeof(mes));
973 dw = mq;
975 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
976 for (i = 0; i < 6; i++) {
977 mes[8] = i;
978 do {
979 ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
980 } while (ret == MQE_CONGESTION);
981 if (ret)
982 break;
984 if (ret != MQE_QUEUE_FULL || i != 4)
985 goto done;
987 for (i = 0; i < 6; i++) {
988 m = gru_get_next_message(&mqd);
989 if (!m || m[8] != i)
990 break;
991 gru_free_message(&mqd, m);
993 ret = (i == 4) ? 0 : -EIO;
995 done:
996 kfree(p);
997 return ret;
1000 static int quicktest2(unsigned long arg)
1002 static DECLARE_COMPLETION(cmp);
1003 unsigned long han;
1004 int blade_id = 0;
1005 int numcb = 4;
1006 int ret = 0;
1007 unsigned long *buf;
1008 void *cb0, *cb;
1009 struct gru_control_block_status *gen;
1010 int i, k, istatus, bytes;
1012 bytes = numcb * 4 * 8;
1013 buf = kmalloc(bytes, GFP_KERNEL);
1014 if (!buf)
1015 return -ENOMEM;
1017 ret = -EBUSY;
1018 han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
1019 if (!han)
1020 goto done;
1022 gru_lock_async_resource(han, &cb0, NULL);
1023 memset(buf, 0xee, bytes);
1024 for (i = 0; i < numcb; i++)
1025 gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
1026 XTYPE_DW, 4, 1, IMA_INTERRUPT);
1028 ret = 0;
1029 k = numcb;
1030 do {
1031 gru_wait_async_cbr(han);
1032 for (i = 0; i < numcb; i++) {
1033 cb = cb0 + i * GRU_HANDLE_STRIDE;
1034 istatus = gru_check_status(cb);
1035 if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
1036 break;
1038 if (i == numcb)
1039 continue;
1040 if (istatus != CBS_IDLE) {
1041 printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
1042 ret = -EFAULT;
1043 } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
1044 buf[4 * i + 3]) {
1045 printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1046 smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
1047 ret = -EIO;
1049 k--;
1050 gen = cb;
1051 gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
1052 } while (k);
1053 BUG_ON(cmp.done);
1055 gru_unlock_async_resource(han);
1056 gru_release_async_resources(han);
1057 done:
1058 kfree(buf);
1059 return ret;
1062 #define BUFSIZE 200
1063 static int quicktest3(unsigned long arg)
1065 char buf1[BUFSIZE], buf2[BUFSIZE];
1066 int ret = 0;
1068 memset(buf2, 0, sizeof(buf2));
1069 memset(buf1, get_cycles() & 255, sizeof(buf1));
1070 gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
1071 if (memcmp(buf1, buf2, BUFSIZE)) {
1072 printk(KERN_DEBUG "GRU quicktest3 error\n");
1073 ret = -EIO;
1075 return ret;
1079 * Debugging only. User hook for various kernel tests
1080 * of driver & gru.
1082 int gru_ktest(unsigned long arg)
1084 int ret = -EINVAL;
1086 switch (arg & 0xff) {
1087 case 0:
1088 ret = quicktest0(arg);
1089 break;
1090 case 1:
1091 ret = quicktest1(arg);
1092 break;
1093 case 2:
1094 ret = quicktest2(arg);
1095 break;
1096 case 3:
1097 ret = quicktest3(arg);
1098 break;
1099 case 99:
1100 ret = gru_free_kernel_contexts();
1101 break;
1103 return ret;
1107 int gru_kservices_init(void)
1109 return 0;
1112 void gru_kservices_exit(void)
1114 if (gru_free_kernel_contexts())
1115 BUG();