gru: fix bug in allocation of kernel contexts
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / misc / sgi-gru / grukservices.c
blobbfbf8fdbcd37bad3889e3a35b0de9a733aca7fee
1 /*
2 * SN Platform GRU Driver
4 * KERNEL SERVICES THAT USE THE GRU
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/errno.h>
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/miscdevice.h>
30 #include <linux/proc_fs.h>
31 #include <linux/interrupt.h>
32 #include <linux/uaccess.h>
33 #include <linux/delay.h>
34 #include "gru.h"
35 #include "grulib.h"
36 #include "grutables.h"
37 #include "grukservices.h"
38 #include "gru_instructions.h"
39 #include <asm/uv/uv_hub.h>
42 * Kernel GRU Usage
44 * The following is an interim algorithm for management of kernel GRU
45 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements.
48 * Blade percpu resources reserved for kernel use. These resources are
49 * reserved whenever the the kernel context for the blade is loaded. Note
50 * that the kernel context is not guaranteed to be always available. It is
51 * loaded on demand & can be stolen by a user if the user demand exceeds the
52 * kernel demand. The kernel can always reload the kernel context but
53 * a SLEEP may be required!!!.
55 * Async Overview:
57 * Each blade has one "kernel context" that owns GRU kernel resources
58 * located on the blade. Kernel drivers use GRU resources in this context
59 * for sending messages, zeroing memory, etc.
61 * The kernel context is dynamically loaded on demand. If it is not in
62 * use by the kernel, the kernel context can be unloaded & given to a user.
63 * The kernel context will be reloaded when needed. This may require that
64 * a context be stolen from a user.
65 * NOTE: frequent unloading/reloading of the kernel context is
66 * expensive. We are depending on batch schedulers, cpusets, sane
67 * drivers or some other mechanism to prevent the need for frequent
68 * stealing/reloading.
70 * The kernel context consists of two parts:
71 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
72 * Each cpu has it's own private resources & does not share them
73 * with other cpus. These resources are used serially, ie,
74 * locked, used & unlocked on each call to a function in
75 * grukservices.
76 * (Now that we have dynamic loading of kernel contexts, I
77 * may rethink this & allow sharing between cpus....)
79 * - Additional resources can be reserved long term & used directly
80 * by UV drivers located in the kernel. Drivers using these GRU
81 * resources can use asynchronous GRU instructions that send
82 * interrupts on completion.
83 * - these resources must be explicitly locked/unlocked
84 * - locked resources prevent (obviously) the kernel
85 * context from being unloaded.
86 * - drivers using these resource directly issue their own
87 * GRU instruction and must wait/check completion.
89 * When these resources are reserved, the caller can optionally
90 * associate a wait_queue with the resources and use asynchronous
91 * GRU instructions. When an async GRU instruction completes, the
92 * driver will do a wakeup on the event.
97 #define ASYNC_HAN_TO_BID(h) ((h) - 1)
98 #define ASYNC_BID_TO_HAN(b) ((b) + 1)
99 #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
101 #define GRU_NUM_KERNEL_CBR 1
102 #define GRU_NUM_KERNEL_DSR_BYTES 256
103 #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
104 GRU_CACHE_LINE_BYTES)
106 /* GRU instruction attributes for all instructions */
107 #define IMA IMA_CB_DELAY
109 /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
110 #define __gru_cacheline_aligned__ \
111 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
113 #define MAGIC 0x1234567887654321UL
115 /* Default retry count for GRU errors on kernel instructions */
116 #define EXCEPTION_RETRY_LIMIT 3
118 /* Status of message queue sections */
119 #define MQS_EMPTY 0
120 #define MQS_FULL 1
121 #define MQS_NOOP 2
123 /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
124 /* optimized for x86_64 */
125 struct message_queue {
126 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
127 int qlines; /* DW 1 */
128 long hstatus[2];
129 void *next __gru_cacheline_aligned__;/* CL 1 */
130 void *limit;
131 void *start;
132 void *start2;
133 char data ____cacheline_aligned; /* CL 2 */
136 /* First word in every message - used by mesq interface */
137 struct message_header {
138 char present;
139 char present2;
140 char lines;
141 char fill;
144 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
147 * Reload the blade's kernel context into a GRU chiplet. Called holding
148 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
150 static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
152 struct gru_state *gru;
153 struct gru_thread_state *kgts;
154 void *vaddr;
155 int ctxnum, ncpus;
157 up_read(&bs->bs_kgts_sema);
158 down_write(&bs->bs_kgts_sema);
160 if (!bs->bs_kgts) {
161 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0);
162 bs->bs_kgts->ts_user_blade_id = blade_id;
164 kgts = bs->bs_kgts;
166 if (!kgts->ts_gru) {
167 STAT(load_kernel_context);
168 ncpus = uv_blade_nr_possible_cpus(blade_id);
169 kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
170 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
171 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
172 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
173 bs->bs_async_dsr_bytes);
174 while (!gru_assign_gru_context(kgts)) {
175 msleep(1);
176 gru_steal_context(kgts);
178 gru_load_context(kgts);
179 gru = bs->bs_kgts->ts_gru;
180 vaddr = gru->gs_gru_base_vaddr;
181 ctxnum = kgts->ts_ctxnum;
182 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
183 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
185 downgrade_write(&bs->bs_kgts_sema);
189 * Free all kernel contexts that are not currently in use.
190 * Returns 0 if all freed, else number of inuse context.
192 static int gru_free_kernel_contexts(void)
194 struct gru_blade_state *bs;
195 struct gru_thread_state *kgts;
196 int bid, ret = 0;
198 for (bid = 0; bid < GRU_MAX_BLADES; bid++) {
199 bs = gru_base[bid];
200 if (!bs)
201 continue;
203 /* Ignore busy contexts. Don't want to block here. */
204 if (down_write_trylock(&bs->bs_kgts_sema)) {
205 kgts = bs->bs_kgts;
206 if (kgts && kgts->ts_gru)
207 gru_unload_context(kgts, 0);
208 bs->bs_kgts = NULL;
209 up_write(&bs->bs_kgts_sema);
210 kfree(kgts);
211 } else {
212 ret++;
215 return ret;
219 * Lock & load the kernel context for the specified blade.
221 static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
223 struct gru_blade_state *bs;
224 int bid;
226 STAT(lock_kernel_context);
227 again:
228 bid = blade_id < 0 ? uv_numa_blade_id() : blade_id;
229 bs = gru_base[bid];
231 /* Handle the case where migration occured while waiting for the sema */
232 down_read(&bs->bs_kgts_sema);
233 if (blade_id < 0 && bid != uv_numa_blade_id()) {
234 up_read(&bs->bs_kgts_sema);
235 goto again;
237 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
238 gru_load_kernel_context(bs, bid);
239 return bs;
244 * Unlock the kernel context for the specified blade. Context is not
245 * unloaded but may be stolen before next use.
247 static void gru_unlock_kernel_context(int blade_id)
249 struct gru_blade_state *bs;
251 bs = gru_base[blade_id];
252 up_read(&bs->bs_kgts_sema);
253 STAT(unlock_kernel_context);
257 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
258 * - returns with preemption disabled
260 static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
262 struct gru_blade_state *bs;
263 int lcpu;
265 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
266 preempt_disable();
267 bs = gru_lock_kernel_context(-1);
268 lcpu = uv_blade_processor_id();
269 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
270 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
271 return 0;
275 * Free the current cpus reserved DSR/CBR resources.
277 static void gru_free_cpu_resources(void *cb, void *dsr)
279 gru_unlock_kernel_context(uv_numa_blade_id());
280 preempt_enable();
284 * Reserve GRU resources to be used asynchronously.
285 * Note: currently supports only 1 reservation per blade.
287 * input:
288 * blade_id - blade on which resources should be reserved
289 * cbrs - number of CBRs
290 * dsr_bytes - number of DSR bytes needed
291 * output:
292 * handle to identify resource
293 * (0 = async resources already reserved)
295 unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
296 struct completion *cmp)
298 struct gru_blade_state *bs;
299 struct gru_thread_state *kgts;
300 int ret = 0;
302 bs = gru_base[blade_id];
304 down_write(&bs->bs_kgts_sema);
306 /* Verify no resources already reserved */
307 if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
308 goto done;
309 bs->bs_async_dsr_bytes = dsr_bytes;
310 bs->bs_async_cbrs = cbrs;
311 bs->bs_async_wq = cmp;
312 kgts = bs->bs_kgts;
314 /* Resources changed. Unload context if already loaded */
315 if (kgts && kgts->ts_gru)
316 gru_unload_context(kgts, 0);
317 ret = ASYNC_BID_TO_HAN(blade_id);
319 done:
320 up_write(&bs->bs_kgts_sema);
321 return ret;
325 * Release async resources previously reserved.
327 * input:
328 * han - handle to identify resources
330 void gru_release_async_resources(unsigned long han)
332 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
334 down_write(&bs->bs_kgts_sema);
335 bs->bs_async_dsr_bytes = 0;
336 bs->bs_async_cbrs = 0;
337 bs->bs_async_wq = NULL;
338 up_write(&bs->bs_kgts_sema);
342 * Wait for async GRU instructions to complete.
344 * input:
345 * han - handle to identify resources
347 void gru_wait_async_cbr(unsigned long han)
349 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
351 wait_for_completion(bs->bs_async_wq);
352 mb();
356 * Lock previous reserved async GRU resources
358 * input:
359 * han - handle to identify resources
360 * output:
361 * cb - pointer to first CBR
362 * dsr - pointer to first DSR
364 void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
366 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
367 int blade_id = ASYNC_HAN_TO_BID(han);
368 int ncpus;
370 gru_lock_kernel_context(blade_id);
371 ncpus = uv_blade_nr_possible_cpus(blade_id);
372 if (cb)
373 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
374 if (dsr)
375 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
379 * Unlock previous reserved async GRU resources
381 * input:
382 * han - handle to identify resources
384 void gru_unlock_async_resource(unsigned long han)
386 int blade_id = ASYNC_HAN_TO_BID(han);
388 gru_unlock_kernel_context(blade_id);
391 /*----------------------------------------------------------------------*/
392 int gru_get_cb_exception_detail(void *cb,
393 struct control_block_extended_exc_detail *excdet)
395 struct gru_control_block_extended *cbe;
396 struct gru_thread_state *kgts = NULL;
397 unsigned long off;
398 int cbrnum, bid;
401 * Locate kgts for cb. This algorithm is SLOW but
402 * this function is rarely called (ie., almost never).
403 * Performance does not matter.
405 for_each_possible_blade(bid) {
406 if (!gru_base[bid])
407 break;
408 kgts = gru_base[bid]->bs_kgts;
409 if (!kgts || !kgts->ts_gru)
410 continue;
411 off = cb - kgts->ts_gru->gs_gru_base_vaddr;
412 if (off < GRU_SIZE)
413 break;
414 kgts = NULL;
416 BUG_ON(!kgts);
417 cbrnum = thread_cbr_number(kgts, get_cb_number(cb));
418 cbe = get_cbe(GRUBASE(cb), cbrnum);
419 gru_flush_cache(cbe); /* CBE not coherent */
420 sync_core();
421 excdet->opc = cbe->opccpy;
422 excdet->exopc = cbe->exopccpy;
423 excdet->ecause = cbe->ecause;
424 excdet->exceptdet0 = cbe->idef1upd;
425 excdet->exceptdet1 = cbe->idef3upd;
426 gru_flush_cache(cbe);
427 return 0;
430 char *gru_get_cb_exception_detail_str(int ret, void *cb,
431 char *buf, int size)
433 struct gru_control_block_status *gen = (void *)cb;
434 struct control_block_extended_exc_detail excdet;
436 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
437 gru_get_cb_exception_detail(cb, &excdet);
438 snprintf(buf, size,
439 "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
440 "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(),
441 gen, excdet.opc, excdet.exopc, excdet.ecause,
442 excdet.exceptdet0, excdet.exceptdet1);
443 } else {
444 snprintf(buf, size, "No exception");
446 return buf;
449 static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
451 while (gen->istatus >= CBS_ACTIVE) {
452 cpu_relax();
453 barrier();
455 return gen->istatus;
458 static int gru_retry_exception(void *cb)
460 struct gru_control_block_status *gen = (void *)cb;
461 struct control_block_extended_exc_detail excdet;
462 int retry = EXCEPTION_RETRY_LIMIT;
464 while (1) {
465 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
466 return CBS_IDLE;
467 if (gru_get_cb_message_queue_substatus(cb))
468 return CBS_EXCEPTION;
469 gru_get_cb_exception_detail(cb, &excdet);
470 if ((excdet.ecause & ~EXCEPTION_RETRY_BITS) ||
471 (excdet.cbrexecstatus & CBR_EXS_ABORT_OCC))
472 break;
473 if (retry-- == 0)
474 break;
475 gen->icmd = 1;
476 gru_flush_cache(gen);
478 return CBS_EXCEPTION;
481 int gru_check_status_proc(void *cb)
483 struct gru_control_block_status *gen = (void *)cb;
484 int ret;
486 ret = gen->istatus;
487 if (ret == CBS_EXCEPTION)
488 ret = gru_retry_exception(cb);
489 rmb();
490 return ret;
494 int gru_wait_proc(void *cb)
496 struct gru_control_block_status *gen = (void *)cb;
497 int ret;
499 ret = gru_wait_idle_or_exception(gen);
500 if (ret == CBS_EXCEPTION)
501 ret = gru_retry_exception(cb);
502 rmb();
503 return ret;
506 void gru_abort(int ret, void *cb, char *str)
508 char buf[GRU_EXC_STR_SIZE];
510 panic("GRU FATAL ERROR: %s - %s\n", str,
511 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
514 void gru_wait_abort_proc(void *cb)
516 int ret;
518 ret = gru_wait_proc(cb);
519 if (ret)
520 gru_abort(ret, cb, "gru_wait_abort");
524 /*------------------------------ MESSAGE QUEUES -----------------------------*/
526 /* Internal status . These are NOT returned to the user. */
527 #define MQIE_AGAIN -1 /* try again */
531 * Save/restore the "present" flag that is in the second line of 2-line
532 * messages
534 static inline int get_present2(void *p)
536 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
537 return mhdr->present;
540 static inline void restore_present2(void *p, int val)
542 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
543 mhdr->present = val;
547 * Create a message queue.
548 * qlines - message queue size in cache lines. Includes 2-line header.
550 int gru_create_message_queue(struct gru_message_queue_desc *mqd,
551 void *p, unsigned int bytes, int nasid, int vector, int apicid)
553 struct message_queue *mq = p;
554 unsigned int qlines;
556 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
557 memset(mq, 0, bytes);
558 mq->start = &mq->data;
559 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
560 mq->next = &mq->data;
561 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
562 mq->qlines = qlines;
563 mq->hstatus[0] = 0;
564 mq->hstatus[1] = 1;
565 mq->head = gru_mesq_head(2, qlines / 2 + 1);
566 mqd->mq = mq;
567 mqd->mq_gpa = uv_gpa(mq);
568 mqd->qlines = qlines;
569 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
570 mqd->interrupt_vector = vector;
571 mqd->interrupt_apicid = apicid;
572 return 0;
574 EXPORT_SYMBOL_GPL(gru_create_message_queue);
577 * Send a NOOP message to a message queue
578 * Returns:
579 * 0 - if queue is full after the send. This is the normal case
580 * but various races can change this.
581 * -1 - if mesq sent successfully but queue not full
582 * >0 - unexpected error. MQE_xxx returned
584 static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
585 void *mesg)
587 const struct message_header noop_header = {
588 .present = MQS_NOOP, .lines = 1};
589 unsigned long m;
590 int substatus, ret;
591 struct message_header save_mhdr, *mhdr = mesg;
593 STAT(mesq_noop);
594 save_mhdr = *mhdr;
595 *mhdr = noop_header;
596 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
597 ret = gru_wait(cb);
599 if (ret) {
600 substatus = gru_get_cb_message_queue_substatus(cb);
601 switch (substatus) {
602 case CBSS_NO_ERROR:
603 STAT(mesq_noop_unexpected_error);
604 ret = MQE_UNEXPECTED_CB_ERR;
605 break;
606 case CBSS_LB_OVERFLOWED:
607 STAT(mesq_noop_lb_overflow);
608 ret = MQE_CONGESTION;
609 break;
610 case CBSS_QLIMIT_REACHED:
611 STAT(mesq_noop_qlimit_reached);
612 ret = 0;
613 break;
614 case CBSS_AMO_NACKED:
615 STAT(mesq_noop_amo_nacked);
616 ret = MQE_CONGESTION;
617 break;
618 case CBSS_PUT_NACKED:
619 STAT(mesq_noop_put_nacked);
620 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
621 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
622 IMA);
623 if (gru_wait(cb) == CBS_IDLE)
624 ret = MQIE_AGAIN;
625 else
626 ret = MQE_UNEXPECTED_CB_ERR;
627 break;
628 case CBSS_PAGE_OVERFLOW:
629 STAT(mesq_noop_page_overflow);
630 /* fallthru */
631 default:
632 BUG();
635 *mhdr = save_mhdr;
636 return ret;
640 * Handle a gru_mesq full.
642 static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
643 void *mesg, int lines)
645 union gru_mesqhead mqh;
646 unsigned int limit, head;
647 unsigned long avalue;
648 int half, qlines;
650 /* Determine if switching to first/second half of q */
651 avalue = gru_get_amo_value(cb);
652 head = gru_get_amo_value_head(cb);
653 limit = gru_get_amo_value_limit(cb);
655 qlines = mqd->qlines;
656 half = (limit != qlines);
658 if (half)
659 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
660 else
661 mqh = gru_mesq_head(2, qlines / 2 + 1);
663 /* Try to get lock for switching head pointer */
664 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
665 if (gru_wait(cb) != CBS_IDLE)
666 goto cberr;
667 if (!gru_get_amo_value(cb)) {
668 STAT(mesq_qf_locked);
669 return MQE_QUEUE_FULL;
672 /* Got the lock. Send optional NOP if queue not full, */
673 if (head != limit) {
674 if (send_noop_message(cb, mqd, mesg)) {
675 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
676 XTYPE_DW, IMA);
677 if (gru_wait(cb) != CBS_IDLE)
678 goto cberr;
679 STAT(mesq_qf_noop_not_full);
680 return MQIE_AGAIN;
682 avalue++;
685 /* Then flip queuehead to other half of queue. */
686 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
687 IMA);
688 if (gru_wait(cb) != CBS_IDLE)
689 goto cberr;
691 /* If not successfully in swapping queue head, clear the hstatus lock */
692 if (gru_get_amo_value(cb) != avalue) {
693 STAT(mesq_qf_switch_head_failed);
694 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
695 IMA);
696 if (gru_wait(cb) != CBS_IDLE)
697 goto cberr;
699 return MQIE_AGAIN;
700 cberr:
701 STAT(mesq_qf_unexpected_error);
702 return MQE_UNEXPECTED_CB_ERR;
706 * Send a cross-partition interrupt to the SSI that contains the target
707 * message queue. Normally, the interrupt is automatically delivered by hardware
708 * but some error conditions require explicit delivery.
710 static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
712 if (mqd->interrupt_vector)
713 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
714 mqd->interrupt_vector);
718 * Handle a PUT failure. Note: if message was a 2-line message, one of the
719 * lines might have successfully have been written. Before sending the
720 * message, "present" must be cleared in BOTH lines to prevent the receiver
721 * from prematurely seeing the full message.
723 static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
724 void *mesg, int lines)
726 unsigned long m;
728 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
729 if (lines == 2) {
730 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
731 if (gru_wait(cb) != CBS_IDLE)
732 return MQE_UNEXPECTED_CB_ERR;
734 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
735 if (gru_wait(cb) != CBS_IDLE)
736 return MQE_UNEXPECTED_CB_ERR;
737 send_message_queue_interrupt(mqd);
738 return MQE_OK;
742 * Handle a gru_mesq failure. Some of these failures are software recoverable
743 * or retryable.
745 static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
746 void *mesg, int lines)
748 int substatus, ret = 0;
750 substatus = gru_get_cb_message_queue_substatus(cb);
751 switch (substatus) {
752 case CBSS_NO_ERROR:
753 STAT(mesq_send_unexpected_error);
754 ret = MQE_UNEXPECTED_CB_ERR;
755 break;
756 case CBSS_LB_OVERFLOWED:
757 STAT(mesq_send_lb_overflow);
758 ret = MQE_CONGESTION;
759 break;
760 case CBSS_QLIMIT_REACHED:
761 STAT(mesq_send_qlimit_reached);
762 ret = send_message_queue_full(cb, mqd, mesg, lines);
763 break;
764 case CBSS_AMO_NACKED:
765 STAT(mesq_send_amo_nacked);
766 ret = MQE_CONGESTION;
767 break;
768 case CBSS_PUT_NACKED:
769 STAT(mesq_send_put_nacked);
770 ret = send_message_put_nacked(cb, mqd, mesg, lines);
771 break;
772 case CBSS_PAGE_OVERFLOW:
773 STAT(mesq_page_overflow);
774 /* fallthru */
775 default:
776 BUG();
778 return ret;
782 * Send a message to a message queue
783 * mqd message queue descriptor
784 * mesg message. ust be vaddr within a GSEG
785 * bytes message size (<= 2 CL)
787 int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
788 unsigned int bytes)
790 struct message_header *mhdr;
791 void *cb;
792 void *dsr;
793 int istatus, clines, ret;
795 STAT(mesq_send);
796 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
798 clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
799 if (gru_get_cpu_resources(bytes, &cb, &dsr))
800 return MQE_BUG_NO_RESOURCES;
801 memcpy(dsr, mesg, bytes);
802 mhdr = dsr;
803 mhdr->present = MQS_FULL;
804 mhdr->lines = clines;
805 if (clines == 2) {
806 mhdr->present2 = get_present2(mhdr);
807 restore_present2(mhdr, MQS_FULL);
810 do {
811 ret = MQE_OK;
812 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
813 istatus = gru_wait(cb);
814 if (istatus != CBS_IDLE)
815 ret = send_message_failure(cb, mqd, dsr, clines);
816 } while (ret == MQIE_AGAIN);
817 gru_free_cpu_resources(cb, dsr);
819 if (ret)
820 STAT(mesq_send_failed);
821 return ret;
823 EXPORT_SYMBOL_GPL(gru_send_message_gpa);
826 * Advance the receive pointer for the queue to the next message.
828 void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
830 struct message_queue *mq = mqd->mq;
831 struct message_header *mhdr = mq->next;
832 void *next, *pnext;
833 int half = -1;
834 int lines = mhdr->lines;
836 if (lines == 2)
837 restore_present2(mhdr, MQS_EMPTY);
838 mhdr->present = MQS_EMPTY;
840 pnext = mq->next;
841 next = pnext + GRU_CACHE_LINE_BYTES * lines;
842 if (next == mq->limit) {
843 next = mq->start;
844 half = 1;
845 } else if (pnext < mq->start2 && next >= mq->start2) {
846 half = 0;
849 if (half >= 0)
850 mq->hstatus[half] = 1;
851 mq->next = next;
853 EXPORT_SYMBOL_GPL(gru_free_message);
856 * Get next message from message queue. Return NULL if no message
857 * present. User must call next_message() to move to next message.
858 * rmq message queue
860 void *gru_get_next_message(struct gru_message_queue_desc *mqd)
862 struct message_queue *mq = mqd->mq;
863 struct message_header *mhdr = mq->next;
864 int present = mhdr->present;
866 /* skip NOOP messages */
867 while (present == MQS_NOOP) {
868 gru_free_message(mqd, mhdr);
869 mhdr = mq->next;
870 present = mhdr->present;
873 /* Wait for both halves of 2 line messages */
874 if (present == MQS_FULL && mhdr->lines == 2 &&
875 get_present2(mhdr) == MQS_EMPTY)
876 present = MQS_EMPTY;
878 if (!present) {
879 STAT(mesq_receive_none);
880 return NULL;
883 if (mhdr->lines == 2)
884 restore_present2(mhdr, mhdr->present2);
886 STAT(mesq_receive);
887 return mhdr;
889 EXPORT_SYMBOL_GPL(gru_get_next_message);
891 /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
894 * Load a DW from a global GPA. The GPA can be a memory or MMR address.
896 int gru_read_gpa(unsigned long *value, unsigned long gpa)
898 void *cb;
899 void *dsr;
900 int ret, iaa;
902 STAT(read_gpa);
903 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
904 return MQE_BUG_NO_RESOURCES;
905 iaa = gpa >> 62;
906 gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA);
907 ret = gru_wait(cb);
908 if (ret == CBS_IDLE)
909 *value = *(unsigned long *)dsr;
910 gru_free_cpu_resources(cb, dsr);
911 return ret;
913 EXPORT_SYMBOL_GPL(gru_read_gpa);
917 * Copy a block of data using the GRU resources
919 int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
920 unsigned int bytes)
922 void *cb;
923 void *dsr;
924 int ret;
926 STAT(copy_gpa);
927 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
928 return MQE_BUG_NO_RESOURCES;
929 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
930 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
931 ret = gru_wait(cb);
932 gru_free_cpu_resources(cb, dsr);
933 return ret;
935 EXPORT_SYMBOL_GPL(gru_copy_gpa);
937 /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
938 /* Temp - will delete after we gain confidence in the GRU */
940 static int quicktest0(unsigned long arg)
942 unsigned long word0;
943 unsigned long word1;
944 void *cb;
945 void *dsr;
946 unsigned long *p;
947 int ret = -EIO;
949 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
950 return MQE_BUG_NO_RESOURCES;
951 p = dsr;
952 word0 = MAGIC;
953 word1 = 0;
955 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
956 if (gru_wait(cb) != CBS_IDLE) {
957 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id());
958 goto done;
961 if (*p != MAGIC) {
962 printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p);
963 goto done;
965 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
966 if (gru_wait(cb) != CBS_IDLE) {
967 printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id());
968 goto done;
971 if (word0 != word1 || word1 != MAGIC) {
972 printk(KERN_DEBUG
973 "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n",
974 smp_processor_id(), word1, MAGIC);
975 goto done;
977 ret = 0;
979 done:
980 gru_free_cpu_resources(cb, dsr);
981 return ret;
984 #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1)))
986 static int quicktest1(unsigned long arg)
988 struct gru_message_queue_desc mqd;
989 void *p, *mq;
990 unsigned long *dw;
991 int i, ret = -EIO;
992 char mes[GRU_CACHE_LINE_BYTES], *m;
994 /* Need 1K cacheline aligned that does not cross page boundary */
995 p = kmalloc(4096, 0);
996 if (p == NULL)
997 return -ENOMEM;
998 mq = ALIGNUP(p, 1024);
999 memset(mes, 0xee, sizeof(mes));
1000 dw = mq;
1002 gru_create_message_queue(&mqd, mq, 8 * GRU_CACHE_LINE_BYTES, 0, 0, 0);
1003 for (i = 0; i < 6; i++) {
1004 mes[8] = i;
1005 do {
1006 ret = gru_send_message_gpa(&mqd, mes, sizeof(mes));
1007 } while (ret == MQE_CONGESTION);
1008 if (ret)
1009 break;
1011 if (ret != MQE_QUEUE_FULL || i != 4) {
1012 printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n",
1013 smp_processor_id(), ret, i);
1014 goto done;
1017 for (i = 0; i < 6; i++) {
1018 m = gru_get_next_message(&mqd);
1019 if (!m || m[8] != i)
1020 break;
1021 gru_free_message(&mqd, m);
1023 if (i != 4) {
1024 printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n",
1025 smp_processor_id(), i, m, m ? m[8] : -1);
1026 goto done;
1028 ret = 0;
1030 done:
1031 kfree(p);
1032 return ret;
1035 static int quicktest2(unsigned long arg)
1037 static DECLARE_COMPLETION(cmp);
1038 unsigned long han;
1039 int blade_id = 0;
1040 int numcb = 4;
1041 int ret = 0;
1042 unsigned long *buf;
1043 void *cb0, *cb;
1044 struct gru_control_block_status *gen;
1045 int i, k, istatus, bytes;
1047 bytes = numcb * 4 * 8;
1048 buf = kmalloc(bytes, GFP_KERNEL);
1049 if (!buf)
1050 return -ENOMEM;
1052 ret = -EBUSY;
1053 han = gru_reserve_async_resources(blade_id, numcb, 0, &cmp);
1054 if (!han)
1055 goto done;
1057 gru_lock_async_resource(han, &cb0, NULL);
1058 memset(buf, 0xee, bytes);
1059 for (i = 0; i < numcb; i++)
1060 gru_vset(cb0 + i * GRU_HANDLE_STRIDE, uv_gpa(&buf[i * 4]), 0,
1061 XTYPE_DW, 4, 1, IMA_INTERRUPT);
1063 ret = 0;
1064 k = numcb;
1065 do {
1066 gru_wait_async_cbr(han);
1067 for (i = 0; i < numcb; i++) {
1068 cb = cb0 + i * GRU_HANDLE_STRIDE;
1069 istatus = gru_check_status(cb);
1070 if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS)
1071 break;
1073 if (i == numcb)
1074 continue;
1075 if (istatus != CBS_IDLE) {
1076 printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i);
1077 ret = -EFAULT;
1078 } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] ||
1079 buf[4 * i + 3]) {
1080 printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n",
1081 smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]);
1082 ret = -EIO;
1084 k--;
1085 gen = cb;
1086 gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */
1087 } while (k);
1088 BUG_ON(cmp.done);
1090 gru_unlock_async_resource(han);
1091 gru_release_async_resources(han);
1092 done:
1093 kfree(buf);
1094 return ret;
1097 #define BUFSIZE 200
1098 static int quicktest3(unsigned long arg)
1100 char buf1[BUFSIZE], buf2[BUFSIZE];
1101 int ret = 0;
1103 memset(buf2, 0, sizeof(buf2));
1104 memset(buf1, get_cycles() & 255, sizeof(buf1));
1105 gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE);
1106 if (memcmp(buf1, buf2, BUFSIZE)) {
1107 printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id());
1108 ret = -EIO;
1110 return ret;
1114 * Debugging only. User hook for various kernel tests
1115 * of driver & gru.
1117 int gru_ktest(unsigned long arg)
1119 int ret = -EINVAL;
1121 switch (arg & 0xff) {
1122 case 0:
1123 ret = quicktest0(arg);
1124 break;
1125 case 1:
1126 ret = quicktest1(arg);
1127 break;
1128 case 2:
1129 ret = quicktest2(arg);
1130 break;
1131 case 3:
1132 ret = quicktest3(arg);
1133 break;
1134 case 99:
1135 ret = gru_free_kernel_contexts();
1136 break;
1138 return ret;
1142 int gru_kservices_init(void)
1144 return 0;
1147 void gru_kservices_exit(void)
1149 if (gru_free_kernel_contexts())
1150 BUG();